aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig11
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/ec.c6
-rw-r--r--drivers/acpi/ibm_acpi.c13
-rw-r--r--drivers/acpi/processor_core.c3
-rw-r--r--drivers/acpi/processor_perflib.c4
-rw-r--r--drivers/acpi/toshiba_acpi.c9
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/pata_hpt37x.c6
-rw-r--r--drivers/block/pktcdvd.c49
-rw-r--r--drivers/bluetooth/hci_usb.c7
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/char/agp/agp.h4
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/agp/generic.c11
-rw-r--r--drivers/char/agp/intel-agp.c172
-rw-r--r--drivers/char/agp/sgi-agp.c9
-rw-r--r--drivers/char/drm/i915_irq.c2
-rw-r--r--drivers/char/hw_random/amd-rng.c2
-rw-r--r--drivers/char/hw_random/geode-rng.c2
-rw-r--r--drivers/char/hw_random/intel-rng.c34
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c2
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/char/ip2/i2ellis.h4
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/connector/cn_proc.c11
-rw-r--r--drivers/cpufreq/cpufreq_stats.c4
-rw-r--r--drivers/hid/Kconfig18
-rw-r--r--drivers/hid/hid-core.c18
-rw-r--r--drivers/hid/hid-input.c29
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/w83793.c127
-rw-r--r--drivers/i2c/busses/Kconfig9
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c4
-rw-r--r--drivers/i2c/busses/i2c-pnx.c7
-rw-r--r--drivers/i2c/chips/m41t00.c1
-rw-r--r--drivers/i2c/i2c-core.c28
-rw-r--r--drivers/ide/pci/atiixp.c18
-rw-r--r--drivers/ide/pci/via82cxxx.c138
-rw-r--r--drivers/ieee1394/sbp2.c2
-rw-r--r--drivers/infiniband/core/cma.c17
-rw-r--r--drivers/infiniband/core/ucma.c11
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c10
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c26
-rw-r--r--drivers/kvm/kvm.h106
-rw-r--r--drivers/kvm/kvm_main.c155
-rw-r--r--drivers/kvm/mmu.c1114
-rw-r--r--drivers/kvm/paging_tmpl.h260
-rw-r--r--drivers/kvm/svm.c121
-rw-r--r--drivers/kvm/vmx.c182
-rw-r--r--drivers/kvm/x86_emulate.c2
-rw-r--r--drivers/leds/leds-s3c24xx.c2
-rw-r--r--drivers/macintosh/via-pmu.c1
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c11
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c2
-rw-r--r--drivers/media/video/ks0127.c8
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c14
-rw-r--r--drivers/media/video/tveeprom.c2
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.h14
-rw-r--r--drivers/media/video/usbvision/usbvision-video.c3
-rw-r--r--drivers/media/video/v4l2-common.c9
-rw-r--r--drivers/media/video/video-buf.c2
-rw-r--r--drivers/media/video/vivi.c7
-rw-r--r--drivers/mmc/at91_mci.c11
-rw-r--r--drivers/mmc/imxmmc.c3
-rw-r--r--drivers/mmc/mmci.c4
-rw-r--r--drivers/mmc/omap.c21
-rw-r--r--drivers/mmc/pxamci.c2
-rw-r--r--drivers/mmc/tifm_sd.c3
-rw-r--r--drivers/net/Space.c11
-rw-r--r--drivers/net/bnx2.c75
-rw-r--r--drivers/net/chelsio/my3126.c5
-rw-r--r--drivers/net/e1000/e1000_main.c6
-rw-r--r--drivers/net/forcedeth.c111
-rw-r--r--drivers/net/ifb.c4
-rw-r--r--drivers/net/ixgb/ixgb.h1
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c1
-rw-r--r--drivers/net/ixgb/ixgb_hw.c3
-rw-r--r--drivers/net/ixgb/ixgb_main.c57
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/pcmcia/3c574_cs.c1
-rw-r--r--drivers/net/pcmcia/3c589_cs.c1
-rw-r--r--drivers/net/pcmcia/com20020_cs.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c1
-rw-r--r--drivers/net/qla3xxx.c38
-rw-r--r--drivers/net/sungem.c3
-rw-r--r--drivers/net/sungem_phy.c179
-rw-r--r--drivers/net/sungem_phy.h7
-rw-r--r--drivers/net/tg3.c17
-rw-r--r--drivers/net/tg3.h4
-rw-r--r--drivers/net/wireless/ipw2100.c2
-rw-r--r--drivers/net/wireless/netwave_cs.c1
-rw-r--r--drivers/net/wireless/ray_cs.c1
-rw-r--r--drivers/net/wireless/wl3501_cs.c1
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/quirks.c16
-rw-r--r--drivers/pci/search.c24
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-rs5c372.c535
-rw-r--r--drivers/rtc/rtc-sh.c4
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/cio/cio.c12
-rw-r--r--drivers/s390/net/Kconfig5
-rw-r--r--drivers/s390/net/qeth.h2
-rw-r--r--drivers/s390/net/qeth_main.c217
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c1
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c1
-rw-r--r--drivers/serial/8250.c2
-rw-r--r--drivers/serial/mpc52xx_uart.c2
-rw-r--r--drivers/usb/class/usblp.c1
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c245
-rw-r--r--drivers/usb/gadget/omap_udc.h3
-rw-r--r--drivers/usb/host/uhci-hcd.c20
-rw-r--r--drivers/usb/input/Kconfig8
-rw-r--r--drivers/usb/input/hid-core.c40
-rw-r--r--drivers/usb/input/hid-ff.c5
-rw-r--r--drivers/usb/input/hiddev.c2
-rw-r--r--drivers/usb/input/usbhid.h3
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c12
-rw-r--r--drivers/usb/net/asix.c2
-rw-r--r--drivers/usb/serial/funsoft.c2
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/storage/unusual_devs.h17
-rw-r--r--drivers/video/backlight/corgi_bl.c2
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/locomolcd.c2
139 files changed, 3459 insertions, 1271 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 1639998e4d27..f4f000abc4e9 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -225,17 +225,6 @@ config ACPI_IBM_DOCK
225 225
226 If you are not sure, say N here. 226 If you are not sure, say N here.
227 227
228config ACPI_IBM_BAY
229 bool "Legacy Removable Bay Support"
230 depends on ACPI_IBM
231 depends on ACPI_BAY=n
232 default n
233 ---help---
234 Allows the ibm_acpi driver to handle removable bays.
235 This support is obsoleted by CONFIG_ACPI_BAY.
236
237 If you are not sure, say N here.
238
239config ACPI_TOSHIBA 228config ACPI_TOSHIBA
240 tristate "Toshiba Laptop Extras" 229 tristate "Toshiba Laptop Extras"
241 depends on X86 230 depends on X86
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 279c4bac92e5..766332e45592 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -561,6 +561,9 @@ static int __init acpi_bus_init_irq(void)
561 case ACPI_IRQ_MODEL_IOSAPIC: 561 case ACPI_IRQ_MODEL_IOSAPIC:
562 message = "IOSAPIC"; 562 message = "IOSAPIC";
563 break; 563 break;
564 case ACPI_IRQ_MODEL_PLATFORM:
565 message = "platform specific model";
566 break;
564 default: 567 default:
565 printk(KERN_WARNING PREFIX "Unknown interrupt routing model\n"); 568 printk(KERN_WARNING PREFIX "Unknown interrupt routing model\n");
566 return -ENODEV; 569 return -ENODEV;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 9c52d87d6f04..cbdf031f3c09 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -424,7 +424,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
424 424
425 snprintf(object_name, 8, "_Q%2.2X", value); 425 snprintf(object_name, 8, "_Q%2.2X", value);
426 426
427 printk(KERN_INFO PREFIX "evaluating %s\n", object_name); 427 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s", object_name));
428 428
429 acpi_evaluate_object(ec->handle, object_name, NULL, NULL); 429 acpi_evaluate_object(ec->handle, object_name, NULL, NULL);
430} 430}
@@ -1016,8 +1016,8 @@ static int __init acpi_ec_set_intr_mode(char *str)
1016 acpi_ec_mode = EC_POLL; 1016 acpi_ec_mode = EC_POLL;
1017 } 1017 }
1018 acpi_ec_driver.ops.add = acpi_ec_add; 1018 acpi_ec_driver.ops.add = acpi_ec_add;
1019 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "EC %s mode.\n", 1019 printk(KERN_NOTICE PREFIX "%s mode.\n",
1020 intr ? "interrupt" : "polling")); 1020 intr ? "interrupt" : "polling");
1021 1021
1022 return 1; 1022 return 1;
1023} 1023}
diff --git a/drivers/acpi/ibm_acpi.c b/drivers/acpi/ibm_acpi.c
index b72d13d11a27..c6144ca66638 100644
--- a/drivers/acpi/ibm_acpi.c
+++ b/drivers/acpi/ibm_acpi.c
@@ -157,7 +157,6 @@ IBM_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */
157 "\\_SB.PCI.ISA.SLCE", /* 570 */ 157 "\\_SB.PCI.ISA.SLCE", /* 570 */
158 ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */ 158 ); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */
159#endif 159#endif
160#ifdef CONFIG_ACPI_IBM_BAY
161IBM_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */ 160IBM_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */
162 "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */ 161 "\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */
163 "\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */ 162 "\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */
@@ -175,7 +174,6 @@ IBM_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */
175IBM_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */ 174IBM_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */
176 "_EJ0", /* 770x */ 175 "_EJ0", /* 770x */
177 ); /* all others */ 176 ); /* all others */
178#endif
179 177
180/* don't list other alternatives as we install a notify handler on the 570 */ 178/* don't list other alternatives as we install a notify handler on the 570 */
181IBM_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */ 179IBM_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */
@@ -1042,7 +1040,6 @@ static int light_write(char *buf)
1042 return 0; 1040 return 0;
1043} 1041}
1044 1042
1045#if defined(CONFIG_ACPI_IBM_DOCK) || defined(CONFIG_ACPI_IBM_BAY)
1046static int _sta(acpi_handle handle) 1043static int _sta(acpi_handle handle)
1047{ 1044{
1048 int status; 1045 int status;
@@ -1052,7 +1049,7 @@ static int _sta(acpi_handle handle)
1052 1049
1053 return status; 1050 return status;
1054} 1051}
1055#endif 1052
1056#ifdef CONFIG_ACPI_IBM_DOCK 1053#ifdef CONFIG_ACPI_IBM_DOCK
1057#define dock_docked() (_sta(dock_handle) & 1) 1054#define dock_docked() (_sta(dock_handle) & 1)
1058 1055
@@ -1118,7 +1115,6 @@ static void dock_notify(struct ibm_struct *ibm, u32 event)
1118} 1115}
1119#endif 1116#endif
1120 1117
1121#ifdef CONFIG_ACPI_IBM_BAY
1122static int bay_status_supported; 1118static int bay_status_supported;
1123static int bay_status2_supported; 1119static int bay_status2_supported;
1124static int bay_eject_supported; 1120static int bay_eject_supported;
@@ -1194,7 +1190,6 @@ static void bay_notify(struct ibm_struct *ibm, u32 event)
1194{ 1190{
1195 acpi_bus_generate_event(ibm->device, event, 0); 1191 acpi_bus_generate_event(ibm->device, event, 0);
1196} 1192}
1197#endif
1198 1193
1199static int cmos_read(char *p) 1194static int cmos_read(char *p)
1200{ 1195{
@@ -2354,7 +2349,6 @@ static struct ibm_struct ibms[] = {
2354 .type = ACPI_SYSTEM_NOTIFY, 2349 .type = ACPI_SYSTEM_NOTIFY,
2355 }, 2350 },
2356#endif 2351#endif
2357#ifdef CONFIG_ACPI_IBM_BAY
2358 { 2352 {
2359 .name = "bay", 2353 .name = "bay",
2360 .init = bay_init, 2354 .init = bay_init,
@@ -2364,7 +2358,6 @@ static struct ibm_struct ibms[] = {
2364 .handle = &bay_handle, 2358 .handle = &bay_handle,
2365 .type = ACPI_SYSTEM_NOTIFY, 2359 .type = ACPI_SYSTEM_NOTIFY,
2366 }, 2360 },
2367#endif
2368 { 2361 {
2369 .name = "cmos", 2362 .name = "cmos",
2370 .read = cmos_read, 2363 .read = cmos_read,
@@ -2650,9 +2643,7 @@ IBM_PARAM(light);
2650#ifdef CONFIG_ACPI_IBM_DOCK 2643#ifdef CONFIG_ACPI_IBM_DOCK
2651IBM_PARAM(dock); 2644IBM_PARAM(dock);
2652#endif 2645#endif
2653#ifdef CONFIG_ACPI_IBM_BAY
2654IBM_PARAM(bay); 2646IBM_PARAM(bay);
2655#endif
2656IBM_PARAM(cmos); 2647IBM_PARAM(cmos);
2657IBM_PARAM(led); 2648IBM_PARAM(led);
2658IBM_PARAM(beep); 2649IBM_PARAM(beep);
@@ -2735,14 +2726,12 @@ static int __init acpi_ibm_init(void)
2735 IBM_HANDLE_INIT(dock); 2726 IBM_HANDLE_INIT(dock);
2736#endif 2727#endif
2737 IBM_HANDLE_INIT(pci); 2728 IBM_HANDLE_INIT(pci);
2738#ifdef CONFIG_ACPI_IBM_BAY
2739 IBM_HANDLE_INIT(bay); 2729 IBM_HANDLE_INIT(bay);
2740 if (bay_handle) 2730 if (bay_handle)
2741 IBM_HANDLE_INIT(bay_ej); 2731 IBM_HANDLE_INIT(bay_ej);
2742 IBM_HANDLE_INIT(bay2); 2732 IBM_HANDLE_INIT(bay2);
2743 if (bay2_handle) 2733 if (bay2_handle)
2744 IBM_HANDLE_INIT(bay2_ej); 2734 IBM_HANDLE_INIT(bay2_ej);
2745#endif
2746 IBM_HANDLE_INIT(beep); 2735 IBM_HANDLE_INIT(beep);
2747 IBM_HANDLE_INIT(ecrd); 2736 IBM_HANDLE_INIT(ecrd);
2748 IBM_HANDLE_INIT(ecwr); 2737 IBM_HANDLE_INIT(ecwr);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 25718fed39f1..5f9496d59ed6 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -476,9 +476,6 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
476 if (cpu_index == -1) { 476 if (cpu_index == -1) {
477 if (ACPI_FAILURE 477 if (ACPI_FAILURE
478 (acpi_processor_hotadd_init(pr->handle, &pr->id))) { 478 (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
479 printk(KERN_ERR PREFIX
480 "Getting cpuindex for acpiid 0x%x\n",
481 pr->acpi_id);
482 return -ENODEV; 479 return -ENODEV;
483 } 480 }
484 } 481 }
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0e60382714bb..5207f9e4b443 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -736,10 +736,6 @@ int acpi_processor_preregister_performance(
736 } 736 }
737 737
738err_ret: 738err_ret:
739 if (retval) {
740 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
741 }
742
743 for_each_possible_cpu(i) { 739 for_each_possible_cpu(i) {
744 pr = processors[i]; 740 pr = processors[i];
745 if (!pr || !pr->performance) 741 if (!pr || !pr->performance)
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index 88aeccbafaaf..d9b651ffcdc0 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -321,13 +321,16 @@ static int set_lcd_status(struct backlight_device *bd)
321static unsigned long write_lcd(const char *buffer, unsigned long count) 321static unsigned long write_lcd(const char *buffer, unsigned long count)
322{ 322{
323 int value; 323 int value;
324 int ret = count; 324 int ret;
325 325
326 if (sscanf(buffer, " brightness : %i", &value) == 1 && 326 if (sscanf(buffer, " brightness : %i", &value) == 1 &&
327 value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) 327 value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
328 ret = set_lcd(value); 328 ret = set_lcd(value);
329 else 329 if (ret == 0)
330 ret = count;
331 } else {
330 ret = -EINVAL; 332 ret = -EINVAL;
333 }
331 return ret; 334 return ret;
332} 335}
333 336
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index b34e0a958d0f..da21552d2b1c 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -381,7 +381,7 @@ config PATA_OPTI
381 If unsure, say N. 381 If unsure, say N.
382 382
383config PATA_OPTIDMA 383config PATA_OPTIDMA
384 tristate "OPTI FireStar PATA support (Veyr Experimental)" 384 tristate "OPTI FireStar PATA support (Very Experimental)"
385 depends on PCI && EXPERIMENTAL 385 depends on PCI && EXPERIMENTAL
386 help 386 help
387 This option enables DMA/PIO support for the later OPTi 387 This option enables DMA/PIO support for the later OPTi
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 47082df7199e..dfb306057cf4 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -25,7 +25,7 @@
25#include <linux/libata.h> 25#include <linux/libata.h>
26 26
27#define DRV_NAME "pata_hpt37x" 27#define DRV_NAME "pata_hpt37x"
28#define DRV_VERSION "0.5.1" 28#define DRV_VERSION "0.5.2"
29 29
30struct hpt_clock { 30struct hpt_clock {
31 u8 xfer_speed; 31 u8 xfer_speed;
@@ -416,7 +416,7 @@ static const char *bad_ata100_5[] = {
416 416
417static unsigned long hpt370_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask) 417static unsigned long hpt370_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
418{ 418{
419 if (adev->class != ATA_DEV_ATA) { 419 if (adev->class == ATA_DEV_ATA) {
420 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) 420 if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
421 mask &= ~ATA_MASK_UDMA; 421 mask &= ~ATA_MASK_UDMA;
422 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) 422 if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5))
@@ -749,7 +749,7 @@ static void hpt37x_bmdma_stop(struct ata_queued_cmd *qc)
749{ 749{
750 struct ata_port *ap = qc->ap; 750 struct ata_port *ap = qc->ap;
751 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 751 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
752 int mscreg = 0x50 + 2 * ap->port_no; 752 int mscreg = 0x50 + 4 * ap->port_no;
753 u8 bwsr_stat, msc_stat; 753 u8 bwsr_stat, msc_stat;
754 754
755 pci_read_config_byte(pdev, 0x6A, &bwsr_stat); 755 pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7c95c762950f..62462190e07e 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -765,47 +765,34 @@ static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio
765 */ 765 */
766static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) 766static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
767{ 767{
768 char sense[SCSI_SENSE_BUFFERSIZE]; 768 request_queue_t *q = bdev_get_queue(pd->bdev);
769 request_queue_t *q;
770 struct request *rq; 769 struct request *rq;
771 DECLARE_COMPLETION_ONSTACK(wait); 770 int ret = 0;
772 int err = 0;
773 771
774 q = bdev_get_queue(pd->bdev); 772 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
773 WRITE : READ, __GFP_WAIT);
774
775 if (cgc->buflen) {
776 if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
777 goto out;
778 }
779
780 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
781 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
782 if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
783 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
775 784
776 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? WRITE : READ,
777 __GFP_WAIT);
778 rq->errors = 0;
779 rq->rq_disk = pd->bdev->bd_disk;
780 rq->bio = NULL;
781 rq->buffer = NULL;
782 rq->timeout = 60*HZ; 785 rq->timeout = 60*HZ;
783 rq->data = cgc->buffer;
784 rq->data_len = cgc->buflen;
785 rq->sense = sense;
786 memset(sense, 0, sizeof(sense));
787 rq->sense_len = 0;
788 rq->cmd_type = REQ_TYPE_BLOCK_PC; 786 rq->cmd_type = REQ_TYPE_BLOCK_PC;
789 rq->cmd_flags |= REQ_HARDBARRIER; 787 rq->cmd_flags |= REQ_HARDBARRIER;
790 if (cgc->quiet) 788 if (cgc->quiet)
791 rq->cmd_flags |= REQ_QUIET; 789 rq->cmd_flags |= REQ_QUIET;
792 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
793 if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
794 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
795 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
796
797 rq->ref_count++;
798 rq->end_io_data = &wait;
799 rq->end_io = blk_end_sync_rq;
800 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
801 generic_unplug_device(q);
802 wait_for_completion(&wait);
803
804 if (rq->errors)
805 err = -EIO;
806 790
791 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
792 ret = rq->errors;
793out:
807 blk_put_request(rq); 794 blk_put_request(rq);
808 return err; 795 return ret;
809} 796}
810 797
811/* 798/*
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index aeefec97fdee..6bdf593081d8 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -117,10 +117,17 @@ static struct usb_device_id blacklist_ids[] = {
117 117
118 /* IBM/Lenovo ThinkPad with Broadcom chip */ 118 /* IBM/Lenovo ThinkPad with Broadcom chip */
119 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU }, 119 { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU },
120 { USB_DEVICE(0x0a5c, 0x2110), .driver_info = HCI_WRONG_SCO_MTU },
120 121
121 /* ANYCOM Bluetooth USB-200 and USB-250 */ 122 /* ANYCOM Bluetooth USB-200 and USB-250 */
122 { USB_DEVICE(0x0a5c, 0x2111), .driver_info = HCI_RESET }, 123 { USB_DEVICE(0x0a5c, 0x2111), .driver_info = HCI_RESET },
123 124
125 /* HP laptop with Broadcom chip */
126 { USB_DEVICE(0x03f0, 0x171d), .driver_info = HCI_WRONG_SCO_MTU },
127
128 /* Dell laptop with Broadcom chip */
129 { USB_DEVICE(0x413c, 0x8126), .driver_info = HCI_WRONG_SCO_MTU },
130
124 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */ 131 /* Microsoft Wireless Transceiver for Bluetooth 2.0 */
125 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET }, 132 { USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET },
126 133
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 66d028d30439..3105dddf59f1 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -337,6 +337,12 @@ static const char *mrw_address_space[] = { "DMA", "GAA" };
337/* used in the audio ioctls */ 337/* used in the audio ioctls */
338#define CHECKAUDIO if ((ret=check_for_audio_disc(cdi, cdo))) return ret 338#define CHECKAUDIO if ((ret=check_for_audio_disc(cdi, cdo))) return ret
339 339
340/*
341 * Another popular OS uses 7 seconds as the hard timeout for default
342 * commands, so it is a good choice for us as well.
343 */
344#define CDROM_DEF_TIMEOUT (7 * HZ)
345
340/* Not-exported routines. */ 346/* Not-exported routines. */
341static int open_for_data(struct cdrom_device_info * cdi); 347static int open_for_data(struct cdrom_device_info * cdi);
342static int check_for_audio_disc(struct cdrom_device_info * cdi, 348static int check_for_audio_disc(struct cdrom_device_info * cdi,
@@ -1528,7 +1534,7 @@ void init_cdrom_command(struct packet_command *cgc, void *buf, int len,
1528 cgc->buffer = (char *) buf; 1534 cgc->buffer = (char *) buf;
1529 cgc->buflen = len; 1535 cgc->buflen = len;
1530 cgc->data_direction = type; 1536 cgc->data_direction = type;
1531 cgc->timeout = 5*HZ; 1537 cgc->timeout = CDROM_DEF_TIMEOUT;
1532} 1538}
1533 1539
1534/* DVD handling */ 1540/* DVD handling */
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 8b3317fd46c9..1d59e2a5b9aa 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -225,6 +225,10 @@ struct agp_bridge_data {
225#define I810_GMS_DISABLE 0x00000000 225#define I810_GMS_DISABLE 0x00000000
226#define I810_PGETBL_CTL 0x2020 226#define I810_PGETBL_CTL 0x2020
227#define I810_PGETBL_ENABLED 0x00000001 227#define I810_PGETBL_ENABLED 0x00000001
228#define I965_PGETBL_SIZE_MASK 0x0000000e
229#define I965_PGETBL_SIZE_512KB (0 << 1)
230#define I965_PGETBL_SIZE_256KB (1 << 1)
231#define I965_PGETBL_SIZE_128KB (2 << 1)
228#define I810_DRAM_CTL 0x3000 232#define I810_DRAM_CTL 0x3000
229#define I810_DRAM_ROW_0 0x00000001 233#define I810_DRAM_ROW_0 0x00000001
230#define I810_DRAM_ROW_0_SDRAM 0x00000001 234#define I810_DRAM_ROW_0_SDRAM 0x00000001
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 2f2c4efff8a3..979300405c0e 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -650,6 +650,15 @@ static struct pci_device_id agp_amd64_pci_table[] = {
650 .subvendor = PCI_ANY_ID, 650 .subvendor = PCI_ANY_ID,
651 .subdevice = PCI_ANY_ID, 651 .subdevice = PCI_ANY_ID,
652 }, 652 },
653 /* VIA K8M890 / K8N890 */
654 {
655 .class = (PCI_CLASS_BRIDGE_HOST << 8),
656 .class_mask = ~0,
657 .vendor = PCI_VENDOR_ID_VIA,
658 .device = PCI_DEVICE_ID_VIA_K8M890CE,
659 .subvendor = PCI_ANY_ID,
660 .subdevice = PCI_ANY_ID,
661 },
653 /* VIA K8T890 */ 662 /* VIA K8T890 */
654 { 663 {
655 .class = (PCI_CLASS_BRIDGE_HOST << 8), 664 .class = (PCI_CLASS_BRIDGE_HOST << 8),
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 883a36a27833..3491d6f84bc6 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -965,6 +965,9 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
965 if (!bridge) 965 if (!bridge)
966 return -EINVAL; 966 return -EINVAL;
967 967
968 if (mem->page_count == 0)
969 return 0;
970
968 temp = bridge->current_size; 971 temp = bridge->current_size;
969 972
970 switch (bridge->driver->size_type) { 973 switch (bridge->driver->size_type) {
@@ -1016,8 +1019,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1016 1019
1017 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 1020 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
1018 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j); 1021 writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j);
1019 readl(bridge->gatt_table+j); /* PCI Posting. */
1020 } 1022 }
1023 readl(bridge->gatt_table+j-1); /* PCI Posting. */
1021 1024
1022 bridge->driver->tlb_flush(mem); 1025 bridge->driver->tlb_flush(mem);
1023 return 0; 1026 return 0;
@@ -1034,6 +1037,9 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1034 if (!bridge) 1037 if (!bridge)
1035 return -EINVAL; 1038 return -EINVAL;
1036 1039
1040 if (mem->page_count == 0)
1041 return 0;
1042
1037 if (type != 0 || mem->type != 0) { 1043 if (type != 0 || mem->type != 0) {
1038 /* The generic routines know nothing of memory types */ 1044 /* The generic routines know nothing of memory types */
1039 return -EINVAL; 1045 return -EINVAL;
@@ -1042,10 +1048,9 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1042 /* AK: bogus, should encode addresses > 4GB */ 1048 /* AK: bogus, should encode addresses > 4GB */
1043 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 1049 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1044 writel(bridge->scratch_page, bridge->gatt_table+i); 1050 writel(bridge->scratch_page, bridge->gatt_table+i);
1045 readl(bridge->gatt_table+i); /* PCI Posting. */
1046 } 1051 }
1052 readl(bridge->gatt_table+i-1); /* PCI Posting. */
1047 1053
1048 global_cache_flush();
1049 bridge->driver->tlb_flush(mem); 1054 bridge->driver->tlb_flush(mem);
1050 return 0; 1055 return 0;
1051} 1056}
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 555b3a8ab49c..ab0a9c0ad7c0 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -207,6 +207,9 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
207 int i, j, num_entries; 207 int i, j, num_entries;
208 void *temp; 208 void *temp;
209 209
210 if (mem->page_count == 0)
211 return 0;
212
210 temp = agp_bridge->current_size; 213 temp = agp_bridge->current_size;
211 num_entries = A_SIZE_FIX(temp)->num_entries; 214 num_entries = A_SIZE_FIX(temp)->num_entries;
212 215
@@ -221,12 +224,16 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
221 if (type != 0 || mem->type != 0) { 224 if (type != 0 || mem->type != 0) {
222 if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) { 225 if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) {
223 /* special insert */ 226 /* special insert */
224 global_cache_flush(); 227 if (!mem->is_flushed) {
228 global_cache_flush();
229 mem->is_flushed = TRUE;
230 }
231
225 for (i = pg_start; i < (pg_start + mem->page_count); i++) { 232 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
226 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, intel_i810_private.registers+I810_PTE_BASE+(i*4)); 233 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, intel_i810_private.registers+I810_PTE_BASE+(i*4));
227 readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
228 } 234 }
229 global_cache_flush(); 235 readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
236
230 agp_bridge->driver->tlb_flush(mem); 237 agp_bridge->driver->tlb_flush(mem);
231 return 0; 238 return 0;
232 } 239 }
@@ -236,14 +243,17 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
236 } 243 }
237 244
238insert: 245insert:
239 global_cache_flush(); 246 if (!mem->is_flushed) {
247 global_cache_flush();
248 mem->is_flushed = TRUE;
249 }
250
240 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 251 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
241 writel(agp_bridge->driver->mask_memory(agp_bridge, 252 writel(agp_bridge->driver->mask_memory(agp_bridge,
242 mem->memory[i], mem->type), 253 mem->memory[i], mem->type),
243 intel_i810_private.registers+I810_PTE_BASE+(j*4)); 254 intel_i810_private.registers+I810_PTE_BASE+(j*4));
244 readl(intel_i810_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
245 } 255 }
246 global_cache_flush(); 256 readl(intel_i810_private.registers+I810_PTE_BASE+((j-1)*4)); /* PCI Posting. */
247 257
248 agp_bridge->driver->tlb_flush(mem); 258 agp_bridge->driver->tlb_flush(mem);
249 return 0; 259 return 0;
@@ -254,12 +264,14 @@ static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
254{ 264{
255 int i; 265 int i;
256 266
267 if (mem->page_count == 0)
268 return 0;
269
257 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 270 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
258 writel(agp_bridge->scratch_page, intel_i810_private.registers+I810_PTE_BASE+(i*4)); 271 writel(agp_bridge->scratch_page, intel_i810_private.registers+I810_PTE_BASE+(i*4));
259 readl(intel_i810_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
260 } 272 }
273 readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4));
261 274
262 global_cache_flush();
263 agp_bridge->driver->tlb_flush(mem); 275 agp_bridge->driver->tlb_flush(mem);
264 return 0; 276 return 0;
265} 277}
@@ -370,6 +382,11 @@ static struct _intel_i830_private {
370 struct pci_dev *i830_dev; /* device one */ 382 struct pci_dev *i830_dev; /* device one */
371 volatile u8 __iomem *registers; 383 volatile u8 __iomem *registers;
372 volatile u32 __iomem *gtt; /* I915G */ 384 volatile u32 __iomem *gtt; /* I915G */
385 /* gtt_entries is the number of gtt entries that are already mapped
386 * to stolen memory. Stolen memory is larger than the memory mapped
387 * through gtt_entries, as it includes some reserved space for the BIOS
388 * popup and for the GTT.
389 */
373 int gtt_entries; 390 int gtt_entries;
374} intel_i830_private; 391} intel_i830_private;
375 392
@@ -380,14 +397,41 @@ static void intel_i830_init_gtt_entries(void)
380 u8 rdct; 397 u8 rdct;
381 int local = 0; 398 int local = 0;
382 static const int ddt[4] = { 0, 16, 32, 64 }; 399 static const int ddt[4] = { 0, 16, 32, 64 };
383 int size; 400 int size; /* reserved space (in kb) at the top of stolen memory */
384 401
385 pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl); 402 pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
386 403
387 /* We obtain the size of the GTT, which is also stored (for some 404 if (IS_I965) {
388 * reason) at the top of stolen memory. Then we add 4KB to that 405 u32 pgetbl_ctl;
389 * for the video BIOS popup, which is also stored in there. */ 406
390 size = agp_bridge->driver->fetch_size() + 4; 407 pci_read_config_dword(agp_bridge->dev, I810_PGETBL_CTL,
408 &pgetbl_ctl);
409 /* The 965 has a field telling us the size of the GTT,
410 * which may be larger than what is necessary to map the
411 * aperture.
412 */
413 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
414 case I965_PGETBL_SIZE_128KB:
415 size = 128;
416 break;
417 case I965_PGETBL_SIZE_256KB:
418 size = 256;
419 break;
420 case I965_PGETBL_SIZE_512KB:
421 size = 512;
422 break;
423 default:
424 printk(KERN_INFO PFX "Unknown page table size, "
425 "assuming 512KB\n");
426 size = 512;
427 }
428 size += 4; /* add in BIOS popup space */
429 } else {
430 /* On previous hardware, the GTT size was just what was
431 * required to map the aperture.
432 */
433 size = agp_bridge->driver->fetch_size() + 4;
434 }
391 435
392 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB || 436 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
393 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { 437 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
@@ -576,6 +620,9 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int
576 int i,j,num_entries; 620 int i,j,num_entries;
577 void *temp; 621 void *temp;
578 622
623 if (mem->page_count == 0)
624 return 0;
625
579 temp = agp_bridge->current_size; 626 temp = agp_bridge->current_size;
580 num_entries = A_SIZE_FIX(temp)->num_entries; 627 num_entries = A_SIZE_FIX(temp)->num_entries;
581 628
@@ -598,16 +645,18 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int
598 (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) 645 (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
599 return -EINVAL; 646 return -EINVAL;
600 647
601 global_cache_flush(); /* FIXME: Necessary ?*/ 648 if (!mem->is_flushed) {
649 global_cache_flush();
650 mem->is_flushed = TRUE;
651 }
602 652
603 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 653 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
604 writel(agp_bridge->driver->mask_memory(agp_bridge, 654 writel(agp_bridge->driver->mask_memory(agp_bridge,
605 mem->memory[i], mem->type), 655 mem->memory[i], mem->type),
606 intel_i830_private.registers+I810_PTE_BASE+(j*4)); 656 intel_i830_private.registers+I810_PTE_BASE+(j*4));
607 readl(intel_i830_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
608 } 657 }
658 readl(intel_i830_private.registers+I810_PTE_BASE+((j-1)*4));
609 659
610 global_cache_flush();
611 agp_bridge->driver->tlb_flush(mem); 660 agp_bridge->driver->tlb_flush(mem);
612 return 0; 661 return 0;
613} 662}
@@ -617,7 +666,8 @@ static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
617{ 666{
618 int i; 667 int i;
619 668
620 global_cache_flush(); 669 if (mem->page_count == 0)
670 return 0;
621 671
622 if (pg_start < intel_i830_private.gtt_entries) { 672 if (pg_start < intel_i830_private.gtt_entries) {
623 printk (KERN_INFO PFX "Trying to disable local/stolen memory\n"); 673 printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
@@ -626,10 +676,9 @@ static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
626 676
627 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 677 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
628 writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4)); 678 writel(agp_bridge->scratch_page, intel_i830_private.registers+I810_PTE_BASE+(i*4));
629 readl(intel_i830_private.registers+I810_PTE_BASE+(i*4)); /* PCI Posting. */
630 } 679 }
680 readl(intel_i830_private.registers+I810_PTE_BASE+((i-1)*4));
631 681
632 global_cache_flush();
633 agp_bridge->driver->tlb_flush(mem); 682 agp_bridge->driver->tlb_flush(mem);
634 return 0; 683 return 0;
635} 684}
@@ -686,6 +735,9 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
686 int i,j,num_entries; 735 int i,j,num_entries;
687 void *temp; 736 void *temp;
688 737
738 if (mem->page_count == 0)
739 return 0;
740
689 temp = agp_bridge->current_size; 741 temp = agp_bridge->current_size;
690 num_entries = A_SIZE_FIX(temp)->num_entries; 742 num_entries = A_SIZE_FIX(temp)->num_entries;
691 743
@@ -708,15 +760,17 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
708 (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) 760 (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
709 return -EINVAL; 761 return -EINVAL;
710 762
711 global_cache_flush(); 763 if (!mem->is_flushed) {
764 global_cache_flush();
765 mem->is_flushed = TRUE;
766 }
712 767
713 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 768 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
714 writel(agp_bridge->driver->mask_memory(agp_bridge, 769 writel(agp_bridge->driver->mask_memory(agp_bridge,
715 mem->memory[i], mem->type), intel_i830_private.gtt+j); 770 mem->memory[i], mem->type), intel_i830_private.gtt+j);
716 readl(intel_i830_private.gtt+j); /* PCI Posting. */
717 } 771 }
772 readl(intel_i830_private.gtt+j-1);
718 773
719 global_cache_flush();
720 agp_bridge->driver->tlb_flush(mem); 774 agp_bridge->driver->tlb_flush(mem);
721 return 0; 775 return 0;
722} 776}
@@ -726,7 +780,8 @@ static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
726{ 780{
727 int i; 781 int i;
728 782
729 global_cache_flush(); 783 if (mem->page_count == 0)
784 return 0;
730 785
731 if (pg_start < intel_i830_private.gtt_entries) { 786 if (pg_start < intel_i830_private.gtt_entries) {
732 printk (KERN_INFO PFX "Trying to disable local/stolen memory\n"); 787 printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
@@ -735,30 +790,34 @@ static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
735 790
736 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 791 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
737 writel(agp_bridge->scratch_page, intel_i830_private.gtt+i); 792 writel(agp_bridge->scratch_page, intel_i830_private.gtt+i);
738 readl(intel_i830_private.gtt+i);
739 } 793 }
794 readl(intel_i830_private.gtt+i-1);
740 795
741 global_cache_flush();
742 agp_bridge->driver->tlb_flush(mem); 796 agp_bridge->driver->tlb_flush(mem);
743 return 0; 797 return 0;
744} 798}
745 799
746static int intel_i915_fetch_size(void) 800/* Return the aperture size by just checking the resource length. The effect
801 * described in the spec of the MSAC registers is just changing of the
802 * resource size.
803 */
804static int intel_i9xx_fetch_size(void)
747{ 805{
748 struct aper_size_info_fixed *values; 806 int num_sizes = sizeof(intel_i830_sizes) / sizeof(*intel_i830_sizes);
749 u32 temp, offset; 807 int aper_size; /* size in megabytes */
808 int i;
750 809
751#define I915_256MB_ADDRESS_MASK (1<<27) 810 aper_size = pci_resource_len(intel_i830_private.i830_dev, 2) / MB(1);
752 811
753 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); 812 for (i = 0; i < num_sizes; i++) {
813 if (aper_size == intel_i830_sizes[i].size) {
814 agp_bridge->current_size = intel_i830_sizes + i;
815 agp_bridge->previous_size = agp_bridge->current_size;
816 return aper_size;
817 }
818 }
754 819
755 pci_read_config_dword(intel_i830_private.i830_dev, I915_GMADDR, &temp); 820 return 0;
756 if (temp & I915_256MB_ADDRESS_MASK)
757 offset = 0; /* 128MB aperture */
758 else
759 offset = 2; /* 256MB aperture */
760 agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
761 return values[offset].size;
762} 821}
763 822
764/* The intel i915 automatically initializes the agp aperture during POST. 823/* The intel i915 automatically initializes the agp aperture during POST.
@@ -821,40 +880,9 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
821 return addr | bridge->driver->masks[type].mask; 880 return addr | bridge->driver->masks[type].mask;
822} 881}
823 882
824static int intel_i965_fetch_size(void)
825{
826 struct aper_size_info_fixed *values;
827 u32 offset = 0;
828 u8 temp;
829
830#define I965_512MB_ADDRESS_MASK (3<<1)
831
832 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
833
834 pci_read_config_byte(intel_i830_private.i830_dev, I965_MSAC, &temp);
835 temp &= I965_512MB_ADDRESS_MASK;
836 switch (temp) {
837 case 0x00:
838 offset = 0; /* 128MB */
839 break;
840 case 0x06:
841 offset = 3; /* 512MB */
842 break;
843 default:
844 case 0x02:
845 offset = 2; /* 256MB */
846 break;
847 }
848
849 agp_bridge->previous_size = agp_bridge->current_size = (void *)(values + offset);
850
851 /* The i965 GTT is always sized as if it had a 512kB aperture size */
852 return 512;
853}
854
855/* The intel i965 automatically initializes the agp aperture during POST. 883/* The intel i965 automatically initializes the agp aperture during POST.
856+ * Use the memory already set aside for in the GTT. 884 * Use the memory already set aside for in the GTT.
857+ */ 885 */
858static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) 886static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
859{ 887{
860 int page_order; 888 int page_order;
@@ -1574,7 +1602,7 @@ static struct agp_bridge_driver intel_915_driver = {
1574 .num_aperture_sizes = 4, 1602 .num_aperture_sizes = 4,
1575 .needs_scratch_page = TRUE, 1603 .needs_scratch_page = TRUE,
1576 .configure = intel_i915_configure, 1604 .configure = intel_i915_configure,
1577 .fetch_size = intel_i915_fetch_size, 1605 .fetch_size = intel_i9xx_fetch_size,
1578 .cleanup = intel_i915_cleanup, 1606 .cleanup = intel_i915_cleanup,
1579 .tlb_flush = intel_i810_tlbflush, 1607 .tlb_flush = intel_i810_tlbflush,
1580 .mask_memory = intel_i810_mask_memory, 1608 .mask_memory = intel_i810_mask_memory,
@@ -1598,7 +1626,7 @@ static struct agp_bridge_driver intel_i965_driver = {
1598 .num_aperture_sizes = 4, 1626 .num_aperture_sizes = 4,
1599 .needs_scratch_page = TRUE, 1627 .needs_scratch_page = TRUE,
1600 .configure = intel_i915_configure, 1628 .configure = intel_i915_configure,
1601 .fetch_size = intel_i965_fetch_size, 1629 .fetch_size = intel_i9xx_fetch_size,
1602 .cleanup = intel_i915_cleanup, 1630 .cleanup = intel_i915_cleanup,
1603 .tlb_flush = intel_i810_tlbflush, 1631 .tlb_flush = intel_i810_tlbflush,
1604 .mask_memory = intel_i965_mask_memory, 1632 .mask_memory = intel_i965_mask_memory,
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index d73be4c2db8a..902648db7efa 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -281,10 +281,11 @@ static int __devinit agp_sgi_init(void)
281 else 281 else
282 return 0; 282 return 0;
283 283
284 sgi_tioca_agp_bridges = 284 sgi_tioca_agp_bridges = kmalloc(tioca_gart_found *
285 (struct agp_bridge_data **)kmalloc(tioca_gart_found * 285 sizeof(struct agp_bridge_data *),
286 sizeof(struct agp_bridge_data *), 286 GFP_KERNEL);
287 GFP_KERNEL); 287 if (!sgi_tioca_agp_bridges)
288 return -ENOMEM;
288 289
289 j = 0; 290 j = 0;
290 list_for_each_entry(info, &tioca_list, ca_list) { 291 list_for_each_entry(info, &tioca_list, ca_list) {
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index e2c4b3a41b1e..78c1ae28f17c 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -500,7 +500,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS)
500 500
501 if (!drm_get_drawable_info(dev, swap.drawable)) { 501 if (!drm_get_drawable_info(dev, swap.drawable)) {
502 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 502 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
503 DRM_ERROR("Invalid drawable ID %d\n", swap.drawable); 503 DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable);
504 return DRM_ERR(EINVAL); 504 return DRM_ERR(EINVAL);
505 } 505 }
506 506
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c
index 71e4e0f3fd54..556fd81fa815 100644
--- a/drivers/char/hw_random/amd-rng.c
+++ b/drivers/char/hw_random/amd-rng.c
@@ -144,7 +144,7 @@ static void __exit mod_exit(void)
144 hwrng_unregister(&amd_rng); 144 hwrng_unregister(&amd_rng);
145} 145}
146 146
147subsys_initcall(mod_init); 147module_init(mod_init);
148module_exit(mod_exit); 148module_exit(mod_exit);
149 149
150MODULE_AUTHOR("The Linux Kernel team"); 150MODULE_AUTHOR("The Linux Kernel team");
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c
index d37ced0d132b..8e8658dcd2e3 100644
--- a/drivers/char/hw_random/geode-rng.c
+++ b/drivers/char/hw_random/geode-rng.c
@@ -125,7 +125,7 @@ static void __exit mod_exit(void)
125 iounmap(mem); 125 iounmap(mem);
126} 126}
127 127
128subsys_initcall(mod_init); 128module_init(mod_init);
129module_exit(mod_exit); 129module_exit(mod_exit);
130 130
131MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs"); 131MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs");
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
index 8efbc9c0e545..f22e78e3c70f 100644
--- a/drivers/char/hw_random/intel-rng.c
+++ b/drivers/char/hw_random/intel-rng.c
@@ -143,6 +143,11 @@ static const struct pci_device_id pci_tbl[] = {
143}; 143};
144MODULE_DEVICE_TABLE(pci, pci_tbl); 144MODULE_DEVICE_TABLE(pci, pci_tbl);
145 145
146static __initdata int no_fwh_detect;
147module_param(no_fwh_detect, int, 0);
148MODULE_PARM_DESC(no_fwh_detect, "Skip FWH detection:\n"
149 " positive value - skip if FWH space locked read-only\n"
150 " negative value - skip always");
146 151
147static inline u8 hwstatus_get(void __iomem *mem) 152static inline u8 hwstatus_get(void __iomem *mem)
148{ 153{
@@ -240,6 +245,11 @@ static int __init mod_init(void)
240 if (!dev) 245 if (!dev)
241 goto out; /* Device not found. */ 246 goto out; /* Device not found. */
242 247
248 if (no_fwh_detect < 0) {
249 pci_dev_put(dev);
250 goto fwh_done;
251 }
252
243 /* Check for Intel 82802 */ 253 /* Check for Intel 82802 */
244 if (dev->device < 0x2640) { 254 if (dev->device < 0x2640) {
245 fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD; 255 fwh_dec_en1_off = FWH_DEC_EN1_REG_OLD;
@@ -252,6 +262,23 @@ static int __init mod_init(void)
252 pci_read_config_byte(dev, fwh_dec_en1_off, &fwh_dec_en1_val); 262 pci_read_config_byte(dev, fwh_dec_en1_off, &fwh_dec_en1_val);
253 pci_read_config_byte(dev, bios_cntl_off, &bios_cntl_val); 263 pci_read_config_byte(dev, bios_cntl_off, &bios_cntl_val);
254 264
265 if ((bios_cntl_val &
266 (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK))
267 == BIOS_CNTL_LOCK_ENABLE_MASK) {
268 static __initdata /*const*/ char warning[] =
269 KERN_WARNING PFX "Firmware space is locked read-only. If you can't or\n"
270 KERN_WARNING PFX "don't want to disable this in firmware setup, and if\n"
271 KERN_WARNING PFX "you are certain that your system has a functional\n"
272 KERN_WARNING PFX "RNG, try using the 'no_fwh_detect' option.\n";
273
274 pci_dev_put(dev);
275 if (no_fwh_detect)
276 goto fwh_done;
277 printk(warning);
278 err = -EBUSY;
279 goto out;
280 }
281
255 mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN); 282 mem = ioremap_nocache(INTEL_FWH_ADDR, INTEL_FWH_ADDR_LEN);
256 if (mem == NULL) { 283 if (mem == NULL) {
257 pci_dev_put(dev); 284 pci_dev_put(dev);
@@ -280,8 +307,7 @@ static int __init mod_init(void)
280 pci_write_config_byte(dev, 307 pci_write_config_byte(dev,
281 fwh_dec_en1_off, 308 fwh_dec_en1_off,
282 fwh_dec_en1_val | FWH_F8_EN_MASK); 309 fwh_dec_en1_val | FWH_F8_EN_MASK);
283 if (!(bios_cntl_val & 310 if (!(bios_cntl_val & BIOS_CNTL_WRITE_ENABLE_MASK))
284 (BIOS_CNTL_LOCK_ENABLE_MASK|BIOS_CNTL_WRITE_ENABLE_MASK)))
285 pci_write_config_byte(dev, 311 pci_write_config_byte(dev,
286 bios_cntl_off, 312 bios_cntl_off,
287 bios_cntl_val | BIOS_CNTL_WRITE_ENABLE_MASK); 313 bios_cntl_val | BIOS_CNTL_WRITE_ENABLE_MASK);
@@ -315,6 +341,8 @@ static int __init mod_init(void)
315 goto out; 341 goto out;
316 } 342 }
317 343
344fwh_done:
345
318 err = -ENOMEM; 346 err = -ENOMEM;
319 mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN); 347 mem = ioremap(INTEL_RNG_ADDR, INTEL_RNG_ADDR_LEN);
320 if (!mem) 348 if (!mem)
@@ -350,7 +378,7 @@ static void __exit mod_exit(void)
350 iounmap(mem); 378 iounmap(mem);
351} 379}
352 380
353subsys_initcall(mod_init); 381module_init(mod_init);
354module_exit(mod_exit); 382module_exit(mod_exit);
355 383
356MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets"); 384MODULE_DESCRIPTION("H/W RNG driver for Intel chipsets");
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
index c9caff57db85..bab43ca32ac1 100644
--- a/drivers/char/hw_random/ixp4xx-rng.c
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -64,7 +64,7 @@ static void __exit ixp4xx_rng_exit(void)
64 iounmap(rng_base); 64 iounmap(rng_base);
65} 65}
66 66
67subsys_initcall(ixp4xx_rng_init); 67module_init(ixp4xx_rng_init);
68module_exit(ixp4xx_rng_exit); 68module_exit(ixp4xx_rng_exit);
69 69
70MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>"); 70MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 0e786b617bb8..9ebf84d18655 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -176,7 +176,7 @@ static void __exit mod_exit(void)
176 hwrng_unregister(&via_rng); 176 hwrng_unregister(&via_rng);
177} 177}
178 178
179subsys_initcall(mod_init); 179module_init(mod_init);
180module_exit(mod_exit); 180module_exit(mod_exit);
181 181
182MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets"); 182MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets");
diff --git a/drivers/char/ip2/i2ellis.h b/drivers/char/ip2/i2ellis.h
index 5eabe47b0bc8..433305062fb8 100644
--- a/drivers/char/ip2/i2ellis.h
+++ b/drivers/char/ip2/i2ellis.h
@@ -606,9 +606,9 @@ static int iiDownloadAll(i2eBordStrPtr, loadHdrStrPtr, int, int);
606// code and returning. 606// code and returning.
607// 607//
608#define COMPLETE(pB,code) \ 608#define COMPLETE(pB,code) \
609 if(1){ \ 609 do { \
610 pB->i2eError = code; \ 610 pB->i2eError = code; \
611 return (code == I2EE_GOOD);\ 611 return (code == I2EE_GOOD);\
612 } 612 } while (0)
613 613
614#endif // I2ELLIS_H 614#endif // I2ELLIS_H
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 4f1813e04754..f5c160caf9f4 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -293,8 +293,8 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
293{ 293{
294 unsigned long pfn; 294 unsigned long pfn;
295 295
296 /* Turn a pfn offset into an absolute pfn */ 296 /* Turn a kernel-virtual address into a physical page frame */
297 pfn = PFN_DOWN(virt_to_phys((void *)PAGE_OFFSET)) + vma->vm_pgoff; 297 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
298 298
299 /* 299 /*
300 * RED-PEN: on some architectures there is more mapped memory 300 * RED-PEN: on some architectures there is more mapped memory
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 3ece69231343..5c9f67f98d10 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/connector.h> 29#include <linux/connector.h>
30#include <asm/atomic.h> 30#include <asm/atomic.h>
31#include <asm/unaligned.h>
31 32
32#include <linux/cn_proc.h> 33#include <linux/cn_proc.h>
33 34
@@ -60,7 +61,7 @@ void proc_fork_connector(struct task_struct *task)
60 ev = (struct proc_event*)msg->data; 61 ev = (struct proc_event*)msg->data;
61 get_seq(&msg->seq, &ev->cpu); 62 get_seq(&msg->seq, &ev->cpu);
62 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 63 ktime_get_ts(&ts); /* get high res monotonic timestamp */
63 ev->timestamp_ns = timespec_to_ns(&ts); 64 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
64 ev->what = PROC_EVENT_FORK; 65 ev->what = PROC_EVENT_FORK;
65 ev->event_data.fork.parent_pid = task->real_parent->pid; 66 ev->event_data.fork.parent_pid = task->real_parent->pid;
66 ev->event_data.fork.parent_tgid = task->real_parent->tgid; 67 ev->event_data.fork.parent_tgid = task->real_parent->tgid;
@@ -88,7 +89,7 @@ void proc_exec_connector(struct task_struct *task)
88 ev = (struct proc_event*)msg->data; 89 ev = (struct proc_event*)msg->data;
89 get_seq(&msg->seq, &ev->cpu); 90 get_seq(&msg->seq, &ev->cpu);
90 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 91 ktime_get_ts(&ts); /* get high res monotonic timestamp */
91 ev->timestamp_ns = timespec_to_ns(&ts); 92 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
92 ev->what = PROC_EVENT_EXEC; 93 ev->what = PROC_EVENT_EXEC;
93 ev->event_data.exec.process_pid = task->pid; 94 ev->event_data.exec.process_pid = task->pid;
94 ev->event_data.exec.process_tgid = task->tgid; 95 ev->event_data.exec.process_tgid = task->tgid;
@@ -124,7 +125,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
124 return; 125 return;
125 get_seq(&msg->seq, &ev->cpu); 126 get_seq(&msg->seq, &ev->cpu);
126 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 127 ktime_get_ts(&ts); /* get high res monotonic timestamp */
127 ev->timestamp_ns = timespec_to_ns(&ts); 128 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
128 129
129 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); 130 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
130 msg->ack = 0; /* not used */ 131 msg->ack = 0; /* not used */
@@ -146,7 +147,7 @@ void proc_exit_connector(struct task_struct *task)
146 ev = (struct proc_event*)msg->data; 147 ev = (struct proc_event*)msg->data;
147 get_seq(&msg->seq, &ev->cpu); 148 get_seq(&msg->seq, &ev->cpu);
148 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 149 ktime_get_ts(&ts); /* get high res monotonic timestamp */
149 ev->timestamp_ns = timespec_to_ns(&ts); 150 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
150 ev->what = PROC_EVENT_EXIT; 151 ev->what = PROC_EVENT_EXIT;
151 ev->event_data.exit.process_pid = task->pid; 152 ev->event_data.exit.process_pid = task->pid;
152 ev->event_data.exit.process_tgid = task->tgid; 153 ev->event_data.exit.process_tgid = task->tgid;
@@ -181,7 +182,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
181 ev = (struct proc_event*)msg->data; 182 ev = (struct proc_event*)msg->data;
182 msg->seq = rcvd_seq; 183 msg->seq = rcvd_seq;
183 ktime_get_ts(&ts); /* get high res monotonic timestamp */ 184 ktime_get_ts(&ts); /* get high res monotonic timestamp */
184 ev->timestamp_ns = timespec_to_ns(&ts); 185 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
185 ev->cpu = -1; 186 ev->cpu = -1;
186 ev->what = PROC_EVENT_NONE; 187 ev->what = PROC_EVENT_NONE;
187 ev->event_data.ack.err = err; 188 ev->event_data.ack.err = err;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 6742b1adf2c8..91ad342a6051 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -285,6 +285,7 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
285 stat = cpufreq_stats_table[freq->cpu]; 285 stat = cpufreq_stats_table[freq->cpu];
286 if (!stat) 286 if (!stat)
287 return 0; 287 return 0;
288
288 old_index = freq_table_get_index(stat, freq->old); 289 old_index = freq_table_get_index(stat, freq->old);
289 new_index = freq_table_get_index(stat, freq->new); 290 new_index = freq_table_get_index(stat, freq->new);
290 291
@@ -292,6 +293,9 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
292 if (old_index == new_index) 293 if (old_index == new_index)
293 return 0; 294 return 0;
294 295
296 if (old_index == -1 || new_index == -1)
297 return 0;
298
295 spin_lock(&cpufreq_stats_lock); 299 spin_lock(&cpufreq_stats_lock);
296 stat->last_index = new_index; 300 stat->last_index = new_index;
297#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 301#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 96d4a0bb2203..ec796ad087df 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -6,13 +6,21 @@ menu "HID Devices"
6 6
7config HID 7config HID
8 tristate "Generic HID support" 8 tristate "Generic HID support"
9 depends on INPUT
9 default y 10 default y
10 ---help--- 11 ---help---
11 Say Y here if you want generic HID support to connect keyboards, 12 A human interface device (HID) is a type of computer device that
12 mice, joysticks, graphic tablets, or any other HID based devices 13 interacts directly with and takes input from humans. The term "HID"
13 to your computer. You also need to select particular types of 14 most commonly used to refer to the USB-HID specification, but other
14 HID devices you want to compile support for, in the particular 15 devices (such as, but not strictly limited to, Bluetooth) are
15 driver menu (USB, Bluetooth) 16 designed using HID specification (this involves certain keyboards,
17 mice, tablets, etc). This option compiles into kernel the generic
18 HID layer code (parser, usages, etc.), which can then be used by
19 transport-specific HID implementation (like USB or Bluetooth).
20
21 For docs and specs, see http://www.usb.org/developers/hidpage/
22
23 If unsure, say Y
16 24
17endmenu 25endmenu
18 26
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 18c2b3cf6bcc..b8cf50fcd64d 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -40,18 +40,10 @@
40 40
41#define DRIVER_VERSION "v2.6" 41#define DRIVER_VERSION "v2.6"
42#define DRIVER_AUTHOR "Andreas Gal, Vojtech Pavlik" 42#define DRIVER_AUTHOR "Andreas Gal, Vojtech Pavlik"
43#define DRIVER_DESC "USB HID core driver" 43#define DRIVER_DESC "HID core driver"
44#define DRIVER_LICENSE "GPL" 44#define DRIVER_LICENSE "GPL"
45 45
46/* 46/*
47 * Module parameters.
48 */
49
50static unsigned int hid_mousepoll_interval;
51module_param_named(mousepoll, hid_mousepoll_interval, uint, 0644);
52MODULE_PARM_DESC(mousepoll, "Polling interval of mice");
53
54/*
55 * Register a new report for a device. 47 * Register a new report for a device.
56 */ 48 */
57 49
@@ -656,7 +648,7 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size)
656 for (i = 0; i < HID_REPORT_TYPES; i++) 648 for (i = 0; i < HID_REPORT_TYPES; i++)
657 INIT_LIST_HEAD(&device->report_enum[i].report_list); 649 INIT_LIST_HEAD(&device->report_enum[i].report_list);
658 650
659 if (!(device->rdesc = (__u8 *)kmalloc(size, GFP_KERNEL))) { 651 if (!(device->rdesc = kmalloc(size, GFP_KERNEL))) {
660 kfree(device->collection); 652 kfree(device->collection);
661 kfree(device); 653 kfree(device);
662 return NULL; 654 return NULL;
@@ -888,6 +880,10 @@ static void hid_output_field(struct hid_field *field, __u8 *data)
888 unsigned size = field->report_size; 880 unsigned size = field->report_size;
889 unsigned n; 881 unsigned n;
890 882
883 /* make sure the unused bits in the last byte are zeros */
884 if (count > 0 && size > 0)
885 data[(count*size-1)/8] = 0;
886
891 for (n = 0; n < count; n++) { 887 for (n = 0; n < count; n++) {
892 if (field->logical_minimum < 0) /* signed values */ 888 if (field->logical_minimum < 0) /* signed values */
893 implement(data, offset + n * size, size, s32ton(field->value[n], size)); 889 implement(data, offset + n * size, size, s32ton(field->value[n], size));
@@ -955,7 +951,7 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
955 } 951 }
956 952
957#ifdef DEBUG_DATA 953#ifdef DEBUG_DATA
958 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", len, report_enum->numbered ? "" : "un"); 954 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
959#endif 955#endif
960 956
961 n = 0; /* Normally report number is 0 */ 957 n = 0; /* Normally report number is 0 */
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 998638020ea0..9cf591a1bda3 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -30,7 +30,6 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/usb/input.h>
34 33
35#undef DEBUG 34#undef DEBUG
36 35
@@ -68,6 +67,7 @@ static const struct {
68#define map_led(c) do { usage->code = c; usage->type = EV_LED; bit = input->ledbit; max = LED_MAX; } while (0) 67#define map_led(c) do { usage->code = c; usage->type = EV_LED; bit = input->ledbit; max = LED_MAX; } while (0)
69 68
70#define map_abs_clear(c) do { map_abs(c); clear_bit(c, bit); } while (0) 69#define map_abs_clear(c) do { map_abs(c); clear_bit(c, bit); } while (0)
70#define map_rel_clear(c) do { map_rel(c); clear_bit(c, bit); } while (0)
71#define map_key_clear(c) do { map_key(c); clear_bit(c, bit); } while (0) 71#define map_key_clear(c) do { map_key(c); clear_bit(c, bit); } while (0)
72 72
73#ifdef CONFIG_USB_HIDINPUT_POWERBOOK 73#ifdef CONFIG_USB_HIDINPUT_POWERBOOK
@@ -292,7 +292,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
292 } 292 }
293 } 293 }
294 294
295 map_key(code); 295 map_key_clear(code);
296 break; 296 break;
297 297
298 298
@@ -343,9 +343,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
343 case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ: 343 case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
344 case HID_GD_SLIDER: case HID_GD_DIAL: case HID_GD_WHEEL: 344 case HID_GD_SLIDER: case HID_GD_DIAL: case HID_GD_WHEEL:
345 if (field->flags & HID_MAIN_ITEM_RELATIVE) 345 if (field->flags & HID_MAIN_ITEM_RELATIVE)
346 map_rel(usage->hid & 0xf); 346 map_rel_clear(usage->hid & 0xf);
347 else 347 else
348 map_abs(usage->hid & 0xf); 348 map_abs_clear(usage->hid & 0xf);
349 break; 349 break;
350 350
351 case HID_GD_HATSWITCH: 351 case HID_GD_HATSWITCH:
@@ -363,9 +363,22 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
363 break; 363 break;
364 364
365 case HID_UP_LED: 365 case HID_UP_LED:
366 if (((usage->hid - 1) & 0xffff) >= LED_MAX) 366
367 goto ignore; 367 switch (usage->hid & 0xffff) { /* HID-Value: */
368 map_led((usage->hid - 1) & 0xffff); 368 case 0x01: map_led (LED_NUML); break; /* "Num Lock" */
369 case 0x02: map_led (LED_CAPSL); break; /* "Caps Lock" */
370 case 0x03: map_led (LED_SCROLLL); break; /* "Scroll Lock" */
371 case 0x04: map_led (LED_COMPOSE); break; /* "Compose" */
372 case 0x05: map_led (LED_KANA); break; /* "Kana" */
373 case 0x27: map_led (LED_SLEEP); break; /* "Stand-By" */
374 case 0x4c: map_led (LED_SUSPEND); break; /* "System Suspend" */
375 case 0x09: map_led (LED_MUTE); break; /* "Mute" */
376 case 0x4b: map_led (LED_MISC); break; /* "Generic Indicator" */
377 case 0x19: map_led (LED_MAIL); break; /* "Message Waiting" */
378 case 0x4d: map_led (LED_CHARGING); break; /* "External Power Connected" */
379
380 default: goto ignore;
381 }
369 break; 382 break;
370 383
371 case HID_UP_DIGITIZER: 384 case HID_UP_DIGITIZER:
@@ -500,7 +513,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
500 case 0x22f: map_key_clear(KEY_ZOOMRESET); break; 513 case 0x22f: map_key_clear(KEY_ZOOMRESET); break;
501 case 0x233: map_key_clear(KEY_SCROLLUP); break; 514 case 0x233: map_key_clear(KEY_SCROLLUP); break;
502 case 0x234: map_key_clear(KEY_SCROLLDOWN); break; 515 case 0x234: map_key_clear(KEY_SCROLLDOWN); break;
503 case 0x238: map_rel(REL_HWHEEL); break; 516 case 0x238: map_rel_clear(REL_HWHEEL); break;
504 case 0x25f: map_key_clear(KEY_CANCEL); break; 517 case 0x25f: map_key_clear(KEY_CANCEL); break;
505 case 0x279: map_key_clear(KEY_REDO); break; 518 case 0x279: map_key_clear(KEY_REDO); break;
506 519
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 31c42002708f..b80f6ed5acfc 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -93,7 +93,7 @@ int vid_from_reg(int val, u8 vrm)
93 case 110: /* Intel Conroe */ 93 case 110: /* Intel Conroe */
94 /* compute in uV, round to mV */ 94 /* compute in uV, round to mV */
95 val &= 0xff; 95 val &= 0xff;
96 if(((val & 0x7e) == 0xfe) || (!(val & 0x7e))) 96 if (val < 0x02 || val > 0xb2)
97 return 0; 97 return 0;
98 return((1600000 - (val - 2) * 6250 + 500) / 1000); 98 return((1600000 - (val - 2) * 6250 + 500) / 1000);
99 case 24: /* Opteron processor */ 99 case 24: /* Opteron processor */
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index c12ac5abc2bb..253ffaf1568a 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -117,6 +117,7 @@ static const u16 W83793_REG_IN[][3] = {
117/* Low Bits of Vcore A/B Vtt Read/High/Low */ 117/* Low Bits of Vcore A/B Vtt Read/High/Low */
118static const u16 W83793_REG_IN_LOW_BITS[] = { 0x1b, 0x68, 0x69 }; 118static const u16 W83793_REG_IN_LOW_BITS[] = { 0x1b, 0x68, 0x69 };
119static u8 scale_in[] = { 2, 2, 2, 16, 16, 16, 8, 24, 24, 16 }; 119static u8 scale_in[] = { 2, 2, 2, 16, 16, 16, 8, 24, 24, 16 };
120static u8 scale_in_add[] = { 0, 0, 0, 0, 0, 0, 0, 150, 150, 0 };
120 121
121#define W83793_REG_FAN(index) (0x23 + 2 * (index)) /* High byte */ 122#define W83793_REG_FAN(index) (0x23 + 2 * (index)) /* High byte */
122#define W83793_REG_FAN_MIN(index) (0x90 + 2 * (index)) /* High byte */ 123#define W83793_REG_FAN_MIN(index) (0x90 + 2 * (index)) /* High byte */
@@ -203,6 +204,8 @@ struct w83793_data {
203 u8 temp_fan_map[6]; /* Temp controls which pwm fan, bit field */ 204 u8 temp_fan_map[6]; /* Temp controls which pwm fan, bit field */
204 205
205 u8 has_pwm; 206 u8 has_pwm;
207 u8 has_temp;
208 u8 has_vid;
206 u8 pwm_enable; /* Register value, each Temp has 1 bit */ 209 u8 pwm_enable; /* Register value, each Temp has 1 bit */
207 u8 pwm_uptime; /* Register value */ 210 u8 pwm_uptime; /* Register value */
208 u8 pwm_downtime; /* Register value */ 211 u8 pwm_downtime; /* Register value */
@@ -500,7 +503,7 @@ store_temp(struct device *dev, struct device_attribute *attr,
500 each has 4 mode:(2 bits) 503 each has 4 mode:(2 bits)
501 0: Stop monitor 504 0: Stop monitor
502 1: Use internal temp sensor(default) 505 1: Use internal temp sensor(default)
503 2: Use sensor in AMD CPU and get result by AMDSI 506 2: Reserved
504 3: Use sensor in Intel CPU and get result by PECI 507 3: Use sensor in Intel CPU and get result by PECI
505 508
506 TR1-TR2 509 TR1-TR2
@@ -509,8 +512,8 @@ store_temp(struct device *dev, struct device_attribute *attr,
509 1: To enable temp sensors monitor 512 1: To enable temp sensors monitor
510*/ 513*/
511 514
512/* 0 disable, 5 AMDSI, 6 PECI */ 515/* 0 disable, 6 PECI */
513static u8 TO_TEMP_MODE[] = { 0, 0, 5, 6 }; 516static u8 TO_TEMP_MODE[] = { 0, 0, 0, 6 };
514 517
515static ssize_t 518static ssize_t
516show_temp_mode(struct device *dev, struct device_attribute *attr, char *buf) 519show_temp_mode(struct device *dev, struct device_attribute *attr, char *buf)
@@ -550,11 +553,10 @@ store_temp_mode(struct device *dev, struct device_attribute *attr,
550 u8 val = simple_strtoul(buf, NULL, 10); 553 u8 val = simple_strtoul(buf, NULL, 10);
551 554
552 /* transform the sysfs interface values into table above */ 555 /* transform the sysfs interface values into table above */
553 if ((val == 5 || val == 6) && (index < 4)) { 556 if ((val == 6) && (index < 4)) {
554 val -= 3; 557 val -= 3;
555 } else if ((val == 3 && index < 4) 558 } else if ((val == 3 && index < 4)
556 || (val == 4 && index >= 4) 559 || (val == 4 && index >= 4)) {
557 || val == 0) {
558 /* transform diode or thermistor into internal enable */ 560 /* transform diode or thermistor into internal enable */
559 val = !!val; 561 val = !!val;
560 } else { 562 } else {
@@ -839,7 +841,9 @@ show_in(struct device *dev, struct device_attribute *attr, char *buf)
839 val <<= 2; 841 val <<= 2;
840 val += (data->in_low_bits[nr] >> (index * 2)) & 0x3; 842 val += (data->in_low_bits[nr] >> (index * 2)) & 0x3;
841 } 843 }
842 return sprintf(buf, "%d\n", val * scale_in[index]); 844 /* voltage inputs 5VDD and 5VSB needs 150mV offset */
845 val = val * scale_in[index] + scale_in_add[index];
846 return sprintf(buf, "%d\n", val);
843} 847}
844 848
845static ssize_t 849static ssize_t
@@ -859,6 +863,10 @@ store_in(struct device *dev, struct device_attribute *attr,
859 scale_in[index] / 2) / scale_in[index]; 863 scale_in[index] / 2) / scale_in[index];
860 mutex_lock(&data->update_lock); 864 mutex_lock(&data->update_lock);
861 if (index > 2) { 865 if (index > 2) {
866 /* fix the limit values of 5VDD and 5VSB to ALARM mechanism */
867 if (1 == nr || 2 == nr) {
868 val -= scale_in_add[index] / scale_in[index];
869 }
862 val = SENSORS_LIMIT(val, 0, 255); 870 val = SENSORS_LIMIT(val, 0, 255);
863 } else { 871 } else {
864 val = SENSORS_LIMIT(val, 0, 0x3FF); 872 val = SENSORS_LIMIT(val, 0, 0x3FF);
@@ -979,12 +987,6 @@ static struct sensor_device_attribute_2 w83793_sensor_attr_2[] = {
979 SENSOR_ATTR_IN(7), 987 SENSOR_ATTR_IN(7),
980 SENSOR_ATTR_IN(8), 988 SENSOR_ATTR_IN(8),
981 SENSOR_ATTR_IN(9), 989 SENSOR_ATTR_IN(9),
982 SENSOR_ATTR_TEMP(1),
983 SENSOR_ATTR_TEMP(2),
984 SENSOR_ATTR_TEMP(3),
985 SENSOR_ATTR_TEMP(4),
986 SENSOR_ATTR_TEMP(5),
987 SENSOR_ATTR_TEMP(6),
988 SENSOR_ATTR_FAN(1), 990 SENSOR_ATTR_FAN(1),
989 SENSOR_ATTR_FAN(2), 991 SENSOR_ATTR_FAN(2),
990 SENSOR_ATTR_FAN(3), 992 SENSOR_ATTR_FAN(3),
@@ -995,6 +997,15 @@ static struct sensor_device_attribute_2 w83793_sensor_attr_2[] = {
995 SENSOR_ATTR_PWM(3), 997 SENSOR_ATTR_PWM(3),
996}; 998};
997 999
1000static struct sensor_device_attribute_2 w83793_temp[] = {
1001 SENSOR_ATTR_TEMP(1),
1002 SENSOR_ATTR_TEMP(2),
1003 SENSOR_ATTR_TEMP(3),
1004 SENSOR_ATTR_TEMP(4),
1005 SENSOR_ATTR_TEMP(5),
1006 SENSOR_ATTR_TEMP(6),
1007};
1008
998/* Fan6-Fan12 */ 1009/* Fan6-Fan12 */
999static struct sensor_device_attribute_2 w83793_left_fan[] = { 1010static struct sensor_device_attribute_2 w83793_left_fan[] = {
1000 SENSOR_ATTR_FAN(6), 1011 SENSOR_ATTR_FAN(6),
@@ -1015,9 +1026,12 @@ static struct sensor_device_attribute_2 w83793_left_pwm[] = {
1015 SENSOR_ATTR_PWM(8), 1026 SENSOR_ATTR_PWM(8),
1016}; 1027};
1017 1028
1018static struct sensor_device_attribute_2 sda_single_files[] = { 1029static struct sensor_device_attribute_2 w83793_vid[] = {
1019 SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0), 1030 SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0),
1020 SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1), 1031 SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1),
1032};
1033
1034static struct sensor_device_attribute_2 sda_single_files[] = {
1021 SENSOR_ATTR_2(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm, 1035 SENSOR_ATTR_2(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm,
1022 NOT_USED, NOT_USED), 1036 NOT_USED, NOT_USED),
1023 SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep, 1037 SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
@@ -1070,11 +1084,17 @@ static int w83793_detach_client(struct i2c_client *client)
1070 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) 1084 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
1071 device_remove_file(dev, &sda_single_files[i].dev_attr); 1085 device_remove_file(dev, &sda_single_files[i].dev_attr);
1072 1086
1087 for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
1088 device_remove_file(dev, &w83793_vid[i].dev_attr);
1089
1073 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++) 1090 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
1074 device_remove_file(dev, &w83793_left_fan[i].dev_attr); 1091 device_remove_file(dev, &w83793_left_fan[i].dev_attr);
1075 1092
1076 for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++) 1093 for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
1077 device_remove_file(dev, &w83793_left_pwm[i].dev_attr); 1094 device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
1095
1096 for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
1097 device_remove_file(dev, &w83793_temp[i].dev_attr);
1078 } 1098 }
1079 1099
1080 if ((err = i2c_detach_client(client))) 1100 if ((err = i2c_detach_client(client)))
@@ -1187,6 +1207,7 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1187 struct w83793_data *data; 1207 struct w83793_data *data;
1188 int files_fan = ARRAY_SIZE(w83793_left_fan) / 7; 1208 int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
1189 int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5; 1209 int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5;
1210 int files_temp = ARRAY_SIZE(w83793_temp) / 6;
1190 int err = 0; 1211 int err = 0;
1191 1212
1192 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { 1213 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -1313,6 +1334,44 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1313 data->has_pwm |= 0x80; 1334 data->has_pwm |= 0x80;
1314 } 1335 }
1315 1336
1337 tmp = w83793_read_value(client, W83793_REG_FANIN_SEL);
1338 if ((tmp & 0x01) && (val & 0x08)) { /* fan 9, second location */
1339 data->has_fan |= 0x100;
1340 }
1341 if ((tmp & 0x02) && (val & 0x10)) { /* fan 10, second location */
1342 data->has_fan |= 0x200;
1343 }
1344 if ((tmp & 0x04) && (val & 0x20)) { /* fan 11, second location */
1345 data->has_fan |= 0x400;
1346 }
1347 if ((tmp & 0x08) && (val & 0x40)) { /* fan 12, second location */
1348 data->has_fan |= 0x800;
1349 }
1350
1351 /* check the temp1-6 mode, ignore former AMDSI selected inputs */
1352 tmp = w83793_read_value(client,W83793_REG_TEMP_MODE[0]);
1353 if (tmp & 0x01)
1354 data->has_temp |= 0x01;
1355 if (tmp & 0x04)
1356 data->has_temp |= 0x02;
1357 if (tmp & 0x10)
1358 data->has_temp |= 0x04;
1359 if (tmp & 0x40)
1360 data->has_temp |= 0x08;
1361
1362 tmp = w83793_read_value(client,W83793_REG_TEMP_MODE[1]);
1363 if (tmp & 0x01)
1364 data->has_temp |= 0x10;
1365 if (tmp & 0x02)
1366 data->has_temp |= 0x20;
1367
1368 /* Detect the VID usage and ignore unused input */
1369 tmp = w83793_read_value(client, W83793_REG_MFC);
1370 if (!(tmp & 0x29))
1371 data->has_vid |= 0x1; /* has VIDA */
1372 if (tmp & 0x80)
1373 data->has_vid |= 0x2; /* has VIDB */
1374
1316 /* Register sysfs hooks */ 1375 /* Register sysfs hooks */
1317 for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) { 1376 for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) {
1318 err = device_create_file(dev, 1377 err = device_create_file(dev,
@@ -1321,6 +1380,14 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1321 goto exit_remove; 1380 goto exit_remove;
1322 } 1381 }
1323 1382
1383 for (i = 0; i < ARRAY_SIZE(w83793_vid); i++) {
1384 if (!(data->has_vid & (1 << i)))
1385 continue;
1386 err = device_create_file(dev, &w83793_vid[i].dev_attr);
1387 if (err)
1388 goto exit_remove;
1389 }
1390
1324 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) { 1391 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
1325 err = device_create_file(dev, &sda_single_files[i].dev_attr); 1392 err = device_create_file(dev, &sda_single_files[i].dev_attr);
1326 if (err) 1393 if (err)
@@ -1328,6 +1395,19 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
1328 1395
1329 } 1396 }
1330 1397
1398 for (i = 0; i < 6; i++) {
1399 int j;
1400 if (!(data->has_temp & (1 << i)))
1401 continue;
1402 for (j = 0; j < files_temp; j++) {
1403 err = device_create_file(dev,
1404 &w83793_temp[(i) * files_temp
1405 + j].dev_attr);
1406 if (err)
1407 goto exit_remove;
1408 }
1409 }
1410
1331 for (i = 5; i < 12; i++) { 1411 for (i = 5; i < 12; i++) {
1332 int j; 1412 int j;
1333 if (!(data->has_fan & (1 << i))) 1413 if (!(data->has_fan & (1 << i)))
@@ -1371,12 +1451,18 @@ exit_remove:
1371 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) 1451 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
1372 device_remove_file(dev, &sda_single_files[i].dev_attr); 1452 device_remove_file(dev, &sda_single_files[i].dev_attr);
1373 1453
1454 for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
1455 device_remove_file(dev, &w83793_vid[i].dev_attr);
1456
1374 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++) 1457 for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
1375 device_remove_file(dev, &w83793_left_fan[i].dev_attr); 1458 device_remove_file(dev, &w83793_left_fan[i].dev_attr);
1376 1459
1377 for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++) 1460 for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
1378 device_remove_file(dev, &w83793_left_pwm[i].dev_attr); 1461 device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
1379 1462
1463 for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
1464 device_remove_file(dev, &w83793_temp[i].dev_attr);
1465
1380 if (data->lm75[0] != NULL) { 1466 if (data->lm75[0] != NULL) {
1381 i2c_detach_client(data->lm75[0]); 1467 i2c_detach_client(data->lm75[0]);
1382 kfree(data->lm75[0]); 1468 kfree(data->lm75[0]);
@@ -1428,6 +1514,8 @@ static void w83793_update_nonvolatile(struct device *dev)
1428 } 1514 }
1429 1515
1430 for (i = 0; i < ARRAY_SIZE(data->temp_fan_map); i++) { 1516 for (i = 0; i < ARRAY_SIZE(data->temp_fan_map); i++) {
1517 if (!(data->has_temp & (1 << i)))
1518 continue;
1431 data->temp_fan_map[i] = 1519 data->temp_fan_map[i] =
1432 w83793_read_value(client, W83793_REG_TEMP_FAN_MAP(i)); 1520 w83793_read_value(client, W83793_REG_TEMP_FAN_MAP(i));
1433 for (j = 1; j < 5; j++) { 1521 for (j = 1; j < 5; j++) {
@@ -1510,9 +1598,12 @@ static struct w83793_data *w83793_update_device(struct device *dev)
1510 w83793_read_value(client, W83793_REG_FAN(i) + 1); 1598 w83793_read_value(client, W83793_REG_FAN(i) + 1);
1511 } 1599 }
1512 1600
1513 for (i = 0; i < ARRAY_SIZE(data->temp); i++) 1601 for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
1602 if (!(data->has_temp & (1 << i)))
1603 continue;
1514 data->temp[i][TEMP_READ] = 1604 data->temp[i][TEMP_READ] =
1515 w83793_read_value(client, W83793_REG_TEMP[i][TEMP_READ]); 1605 w83793_read_value(client, W83793_REG_TEMP[i][TEMP_READ]);
1606 }
1516 1607
1517 data->temp_low_bits = 1608 data->temp_low_bits =
1518 w83793_read_value(client, W83793_REG_TEMP_LOW_BITS); 1609 w83793_read_value(client, W83793_REG_TEMP_LOW_BITS);
@@ -1527,8 +1618,10 @@ static struct w83793_data *w83793_update_device(struct device *dev)
1527 for (i = 0; i < ARRAY_SIZE(data->alarms); i++) 1618 for (i = 0; i < ARRAY_SIZE(data->alarms); i++)
1528 data->alarms[i] = 1619 data->alarms[i] =
1529 w83793_read_value(client, W83793_REG_ALARM(i)); 1620 w83793_read_value(client, W83793_REG_ALARM(i));
1530 data->vid[0] = w83793_read_value(client, W83793_REG_VID_INA); 1621 if (data->has_vid & 0x01)
1531 data->vid[1] = w83793_read_value(client, W83793_REG_VID_INB); 1622 data->vid[0] = w83793_read_value(client, W83793_REG_VID_INA);
1623 if (data->has_vid & 0x02)
1624 data->vid[1] = w83793_read_value(client, W83793_REG_VID_INB);
1532 w83793_update_nonvolatile(dev); 1625 w83793_update_nonvolatile(dev);
1533 data->last_updated = jiffies; 1626 data->last_updated = jiffies;
1534 data->valid = 1; 1627 data->valid = 1;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e1989f3a2684..9367c4cfe936 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -564,13 +564,4 @@ config I2C_PNX
564 This driver can also be built as a module. If so, the module 564 This driver can also be built as a module. If so, the module
565 will be called i2c-pnx. 565 will be called i2c-pnx.
566 566
567config I2C_PNX_EARLY
568 bool "Early initialization for I2C on PNXxxxx"
569 depends on I2C_PNX=y
570 help
571 Under certain circumstances one may need to make sure I2C on PNXxxxx
572 is initialized earlier than some other driver that depends on it
573 (for instance, that might be USB in case of PNX4008). With this
574 option turned on you can guarantee that.
575
576endmenu 567endmenu
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index bbc8e3a7ff55..490173611d6b 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -529,6 +529,8 @@ mv64xxx_i2c_probe(struct platform_device *pd)
529 platform_set_drvdata(pd, drv_data); 529 platform_set_drvdata(pd, drv_data);
530 i2c_set_adapdata(&drv_data->adapter, drv_data); 530 i2c_set_adapdata(&drv_data->adapter, drv_data);
531 531
532 mv64xxx_i2c_hw_init(drv_data);
533
532 if (request_irq(drv_data->irq, mv64xxx_i2c_intr, 0, 534 if (request_irq(drv_data->irq, mv64xxx_i2c_intr, 0,
533 MV64XXX_I2C_CTLR_NAME, drv_data)) { 535 MV64XXX_I2C_CTLR_NAME, drv_data)) {
534 dev_err(&drv_data->adapter.dev, 536 dev_err(&drv_data->adapter.dev,
@@ -542,8 +544,6 @@ mv64xxx_i2c_probe(struct platform_device *pd)
542 goto exit_free_irq; 544 goto exit_free_irq;
543 } 545 }
544 546
545 mv64xxx_i2c_hw_init(drv_data);
546
547 return 0; 547 return 0;
548 548
549 exit_free_irq: 549 exit_free_irq:
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index de0bca77e926..17376feb1acc 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -305,8 +305,7 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
305 return 0; 305 return 0;
306} 306}
307 307
308static irqreturn_t 308static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
309i2c_pnx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
310{ 309{
311 u32 stat, ctl; 310 u32 stat, ctl;
312 struct i2c_adapter *adap = dev_id; 311 struct i2c_adapter *adap = dev_id;
@@ -699,10 +698,6 @@ MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>");
699MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses"); 698MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses");
700MODULE_LICENSE("GPL"); 699MODULE_LICENSE("GPL");
701 700
702#ifdef CONFIG_I2C_PNX_EARLY
703/* We need to make sure I2C is initialized before USB */ 701/* We need to make sure I2C is initialized before USB */
704subsys_initcall(i2c_adap_pnx_init); 702subsys_initcall(i2c_adap_pnx_init);
705#else
706mudule_init(i2c_adap_pnx_init);
707#endif
708module_exit(i2c_adap_pnx_exit); 703module_exit(i2c_adap_pnx_exit);
diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c
index 420377c86422..3fcb646e2073 100644
--- a/drivers/i2c/chips/m41t00.c
+++ b/drivers/i2c/chips/m41t00.c
@@ -209,6 +209,7 @@ m41t00_set(void *arg)
209 buf[m41t00_chip->hour] = (buf[m41t00_chip->hour] & ~0x3f) | (hour& 0x3f); 209 buf[m41t00_chip->hour] = (buf[m41t00_chip->hour] & ~0x3f) | (hour& 0x3f);
210 buf[m41t00_chip->day] = (buf[m41t00_chip->day] & ~0x3f) | (day & 0x3f); 210 buf[m41t00_chip->day] = (buf[m41t00_chip->day] & ~0x3f) | (day & 0x3f);
211 buf[m41t00_chip->mon] = (buf[m41t00_chip->mon] & ~0x1f) | (mon & 0x1f); 211 buf[m41t00_chip->mon] = (buf[m41t00_chip->mon] & ~0x1f) | (mon & 0x1f);
212 buf[m41t00_chip->year] = year;
212 213
213 if (i2c_master_send(save_client, wbuf, 9) < 0) 214 if (i2c_master_send(save_client, wbuf, 9) < 0)
214 dev_err(&save_client->dev, "m41t00_set: Write error\n"); 215 dev_err(&save_client->dev, "m41t00_set: Write error\n");
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 3e31f1d265c9..b05378a3d673 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -95,16 +95,32 @@ struct device_driver i2c_adapter_driver = {
95 .bus = &i2c_bus_type, 95 .bus = &i2c_bus_type,
96}; 96};
97 97
98/* ------------------------------------------------------------------------- */
99
100/* I2C bus adapters -- one roots each I2C or SMBUS segment */
101
98static void i2c_adapter_class_dev_release(struct class_device *dev) 102static void i2c_adapter_class_dev_release(struct class_device *dev)
99{ 103{
100 struct i2c_adapter *adap = class_dev_to_i2c_adapter(dev); 104 struct i2c_adapter *adap = class_dev_to_i2c_adapter(dev);
101 complete(&adap->class_dev_released); 105 complete(&adap->class_dev_released);
102} 106}
103 107
108static ssize_t i2c_adapter_show_name(struct class_device *cdev, char *buf)
109{
110 struct i2c_adapter *adap = class_dev_to_i2c_adapter(cdev);
111 return sprintf(buf, "%s\n", adap->name);
112}
113
114static struct class_device_attribute i2c_adapter_attrs[] = {
115 __ATTR(name, S_IRUGO, i2c_adapter_show_name, NULL),
116 { },
117};
118
104struct class i2c_adapter_class = { 119struct class i2c_adapter_class = {
105 .owner = THIS_MODULE, 120 .owner = THIS_MODULE,
106 .name = "i2c-adapter", 121 .name = "i2c-adapter",
107 .release = &i2c_adapter_class_dev_release, 122 .class_dev_attrs = i2c_adapter_attrs,
123 .release = &i2c_adapter_class_dev_release,
108}; 124};
109 125
110static ssize_t show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf) 126static ssize_t show_adapter_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -175,8 +191,12 @@ int i2c_add_adapter(struct i2c_adapter *adap)
175 * If the parent pointer is not set up, 191 * If the parent pointer is not set up,
176 * we add this adapter to the host bus. 192 * we add this adapter to the host bus.
177 */ 193 */
178 if (adap->dev.parent == NULL) 194 if (adap->dev.parent == NULL) {
179 adap->dev.parent = &platform_bus; 195 adap->dev.parent = &platform_bus;
196 printk(KERN_WARNING "**WARNING** I2C adapter driver [%s] "
197 "forgot to specify physical device; fix it!\n",
198 adap->name);
199 }
180 sprintf(adap->dev.bus_id, "i2c-%d", adap->nr); 200 sprintf(adap->dev.bus_id, "i2c-%d", adap->nr);
181 adap->dev.driver = &i2c_adapter_driver; 201 adap->dev.driver = &i2c_adapter_driver;
182 adap->dev.release = &i2c_adapter_dev_release; 202 adap->dev.release = &i2c_adapter_dev_release;
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index ffdffb6379ef..524e65de4398 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -46,6 +46,8 @@ static atiixp_ide_timing mdma_timing[] = {
46 46
47static int save_mdma_mode[4]; 47static int save_mdma_mode[4];
48 48
49static DEFINE_SPINLOCK(atiixp_lock);
50
49/** 51/**
50 * atiixp_ratemask - compute rate mask for ATIIXP IDE 52 * atiixp_ratemask - compute rate mask for ATIIXP IDE
51 * @drive: IDE drive to compute for 53 * @drive: IDE drive to compute for
@@ -105,7 +107,7 @@ static int atiixp_ide_dma_host_on(ide_drive_t *drive)
105 unsigned long flags; 107 unsigned long flags;
106 u16 tmp16; 108 u16 tmp16;
107 109
108 spin_lock_irqsave(&ide_lock, flags); 110 spin_lock_irqsave(&atiixp_lock, flags);
109 111
110 pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); 112 pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
111 if (save_mdma_mode[drive->dn]) 113 if (save_mdma_mode[drive->dn])
@@ -114,7 +116,7 @@ static int atiixp_ide_dma_host_on(ide_drive_t *drive)
114 tmp16 |= (1 << drive->dn); 116 tmp16 |= (1 << drive->dn);
115 pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16); 117 pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
116 118
117 spin_unlock_irqrestore(&ide_lock, flags); 119 spin_unlock_irqrestore(&atiixp_lock, flags);
118 120
119 return __ide_dma_host_on(drive); 121 return __ide_dma_host_on(drive);
120} 122}
@@ -125,13 +127,13 @@ static int atiixp_ide_dma_host_off(ide_drive_t *drive)
125 unsigned long flags; 127 unsigned long flags;
126 u16 tmp16; 128 u16 tmp16;
127 129
128 spin_lock_irqsave(&ide_lock, flags); 130 spin_lock_irqsave(&atiixp_lock, flags);
129 131
130 pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16); 132 pci_read_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
131 tmp16 &= ~(1 << drive->dn); 133 tmp16 &= ~(1 << drive->dn);
132 pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16); 134 pci_write_config_word(dev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
133 135
134 spin_unlock_irqrestore(&ide_lock, flags); 136 spin_unlock_irqrestore(&atiixp_lock, flags);
135 137
136 return __ide_dma_host_off(drive); 138 return __ide_dma_host_off(drive);
137} 139}
@@ -152,7 +154,7 @@ static void atiixp_tuneproc(ide_drive_t *drive, u8 pio)
152 u32 pio_timing_data; 154 u32 pio_timing_data;
153 u16 pio_mode_data; 155 u16 pio_mode_data;
154 156
155 spin_lock_irqsave(&ide_lock, flags); 157 spin_lock_irqsave(&atiixp_lock, flags);
156 158
157 pci_read_config_word(dev, ATIIXP_IDE_PIO_MODE, &pio_mode_data); 159 pci_read_config_word(dev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
158 pio_mode_data &= ~(0x07 << (drive->dn * 4)); 160 pio_mode_data &= ~(0x07 << (drive->dn * 4));
@@ -165,7 +167,7 @@ static void atiixp_tuneproc(ide_drive_t *drive, u8 pio)
165 (pio_timing[pio].command_width << (timing_shift + 4)); 167 (pio_timing[pio].command_width << (timing_shift + 4));
166 pci_write_config_dword(dev, ATIIXP_IDE_PIO_TIMING, pio_timing_data); 168 pci_write_config_dword(dev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
167 169
168 spin_unlock_irqrestore(&ide_lock, flags); 170 spin_unlock_irqrestore(&atiixp_lock, flags);
169} 171}
170 172
171/** 173/**
@@ -189,7 +191,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
189 191
190 speed = ide_rate_filter(atiixp_ratemask(drive), xferspeed); 192 speed = ide_rate_filter(atiixp_ratemask(drive), xferspeed);
191 193
192 spin_lock_irqsave(&ide_lock, flags); 194 spin_lock_irqsave(&atiixp_lock, flags);
193 195
194 save_mdma_mode[drive->dn] = 0; 196 save_mdma_mode[drive->dn] = 0;
195 if (speed >= XFER_UDMA_0) { 197 if (speed >= XFER_UDMA_0) {
@@ -208,7 +210,7 @@ static int atiixp_speedproc(ide_drive_t *drive, u8 xferspeed)
208 } 210 }
209 } 211 }
210 212
211 spin_unlock_irqrestore(&ide_lock, flags); 213 spin_unlock_irqrestore(&atiixp_lock, flags);
212 214
213 if (speed >= XFER_SW_DMA_0) 215 if (speed >= XFER_SW_DMA_0)
214 pio = atiixp_dma_2_pio(speed); 216 pio = atiixp_dma_2_pio(speed);
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index 61f1a9665a7f..381cc6f101ce 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -123,7 +123,7 @@ struct via82cxxx_dev
123static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing) 123static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
124{ 124{
125 struct pci_dev *dev = hwif->pci_dev; 125 struct pci_dev *dev = hwif->pci_dev;
126 struct via82cxxx_dev *vdev = ide_get_hwifdata(hwif); 126 struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
127 u8 t; 127 u8 t;
128 128
129 if (~vdev->via_config->flags & VIA_BAD_AST) { 129 if (~vdev->via_config->flags & VIA_BAD_AST) {
@@ -162,7 +162,7 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
162static int via_set_drive(ide_drive_t *drive, u8 speed) 162static int via_set_drive(ide_drive_t *drive, u8 speed)
163{ 163{
164 ide_drive_t *peer = HWIF(drive)->drives + (~drive->dn & 1); 164 ide_drive_t *peer = HWIF(drive)->drives + (~drive->dn & 1);
165 struct via82cxxx_dev *vdev = ide_get_hwifdata(drive->hwif); 165 struct via82cxxx_dev *vdev = pci_get_drvdata(drive->hwif->pci_dev);
166 struct ide_timing t, p; 166 struct ide_timing t, p;
167 unsigned int T, UT; 167 unsigned int T, UT;
168 168
@@ -225,7 +225,7 @@ static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio)
225static int via82cxxx_ide_dma_check (ide_drive_t *drive) 225static int via82cxxx_ide_dma_check (ide_drive_t *drive)
226{ 226{
227 ide_hwif_t *hwif = HWIF(drive); 227 ide_hwif_t *hwif = HWIF(drive);
228 struct via82cxxx_dev *vdev = ide_get_hwifdata(hwif); 228 struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
229 u16 w80 = hwif->udma_four; 229 u16 w80 = hwif->udma_four;
230 230
231 u16 speed = ide_find_best_mode(drive, 231 u16 speed = ide_find_best_mode(drive,
@@ -262,6 +262,53 @@ static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
262 return via_config; 262 return via_config;
263} 263}
264 264
265/*
266 * Check and handle 80-wire cable presence
267 */
268static void __devinit via_cable_detect(struct via82cxxx_dev *vdev, u32 u)
269{
270 int i;
271
272 switch (vdev->via_config->flags & VIA_UDMA) {
273 case VIA_UDMA_66:
274 for (i = 24; i >= 0; i -= 8)
275 if (((u >> (i & 16)) & 8) &&
276 ((u >> i) & 0x20) &&
277 (((u >> i) & 7) < 2)) {
278 /*
279 * 2x PCI clock and
280 * UDMA w/ < 3T/cycle
281 */
282 vdev->via_80w |= (1 << (1 - (i >> 4)));
283 }
284 break;
285
286 case VIA_UDMA_100:
287 for (i = 24; i >= 0; i -= 8)
288 if (((u >> i) & 0x10) ||
289 (((u >> i) & 0x20) &&
290 (((u >> i) & 7) < 4))) {
291 /* BIOS 80-wire bit or
292 * UDMA w/ < 60ns/cycle
293 */
294 vdev->via_80w |= (1 << (1 - (i >> 4)));
295 }
296 break;
297
298 case VIA_UDMA_133:
299 for (i = 24; i >= 0; i -= 8)
300 if (((u >> i) & 0x10) ||
301 (((u >> i) & 0x20) &&
302 (((u >> i) & 7) < 6))) {
303 /* BIOS 80-wire bit or
304 * UDMA w/ < 60ns/cycle
305 */
306 vdev->via_80w |= (1 << (1 - (i >> 4)));
307 }
308 break;
309 }
310}
311
265/** 312/**
266 * init_chipset_via82cxxx - initialization handler 313 * init_chipset_via82cxxx - initialization handler
267 * @dev: PCI device 314 * @dev: PCI device
@@ -274,14 +321,22 @@ static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
274static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const char *name) 321static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const char *name)
275{ 322{
276 struct pci_dev *isa = NULL; 323 struct pci_dev *isa = NULL;
324 struct via82cxxx_dev *vdev;
277 struct via_isa_bridge *via_config; 325 struct via_isa_bridge *via_config;
278 u8 t, v; 326 u8 t, v;
279 unsigned int u; 327 u32 u;
328
329 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
330 if (!vdev) {
331 printk(KERN_ERR "VP_IDE: out of memory :(\n");
332 return -ENOMEM;
333 }
334 pci_set_drvdata(dev, vdev);
280 335
281 /* 336 /*
282 * Find the ISA bridge to see how good the IDE is. 337 * Find the ISA bridge to see how good the IDE is.
283 */ 338 */
284 via_config = via_config_find(&isa); 339 vdev->via_config = via_config = via_config_find(&isa);
285 340
286 /* We checked this earlier so if it fails here deeep badness 341 /* We checked this earlier so if it fails here deeep badness
287 is involved */ 342 is involved */
@@ -289,16 +344,17 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
289 BUG_ON(!via_config->id); 344 BUG_ON(!via_config->id);
290 345
291 /* 346 /*
292 * Setup or disable Clk66 if appropriate 347 * Detect cable and configure Clk66
293 */ 348 */
349 pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
350
351 via_cable_detect(vdev, u);
294 352
295 if ((via_config->flags & VIA_UDMA) == VIA_UDMA_66) { 353 if ((via_config->flags & VIA_UDMA) == VIA_UDMA_66) {
296 /* Enable Clk66 */ 354 /* Enable Clk66 */
297 pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
298 pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008); 355 pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008);
299 } else if (via_config->flags & VIA_BAD_CLK66) { 356 } else if (via_config->flags & VIA_BAD_CLK66) {
300 /* Would cause trouble on 596a and 686 */ 357 /* Would cause trouble on 596a and 686 */
301 pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
302 pci_write_config_dword(dev, VIA_UDMA_TIMING, u & ~0x80008); 358 pci_write_config_dword(dev, VIA_UDMA_TIMING, u & ~0x80008);
303 } 359 }
304 360
@@ -367,75 +423,11 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
367 return 0; 423 return 0;
368} 424}
369 425
370/*
371 * Check and handle 80-wire cable presence
372 */
373static void __devinit via_cable_detect(struct pci_dev *dev, struct via82cxxx_dev *vdev)
374{
375 unsigned int u;
376 int i;
377 pci_read_config_dword(dev, VIA_UDMA_TIMING, &u);
378
379 switch (vdev->via_config->flags & VIA_UDMA) {
380
381 case VIA_UDMA_66:
382 for (i = 24; i >= 0; i -= 8)
383 if (((u >> (i & 16)) & 8) &&
384 ((u >> i) & 0x20) &&
385 (((u >> i) & 7) < 2)) {
386 /*
387 * 2x PCI clock and
388 * UDMA w/ < 3T/cycle
389 */
390 vdev->via_80w |= (1 << (1 - (i >> 4)));
391 }
392 break;
393
394 case VIA_UDMA_100:
395 for (i = 24; i >= 0; i -= 8)
396 if (((u >> i) & 0x10) ||
397 (((u >> i) & 0x20) &&
398 (((u >> i) & 7) < 4))) {
399 /* BIOS 80-wire bit or
400 * UDMA w/ < 60ns/cycle
401 */
402 vdev->via_80w |= (1 << (1 - (i >> 4)));
403 }
404 break;
405
406 case VIA_UDMA_133:
407 for (i = 24; i >= 0; i -= 8)
408 if (((u >> i) & 0x10) ||
409 (((u >> i) & 0x20) &&
410 (((u >> i) & 7) < 6))) {
411 /* BIOS 80-wire bit or
412 * UDMA w/ < 60ns/cycle
413 */
414 vdev->via_80w |= (1 << (1 - (i >> 4)));
415 }
416 break;
417
418 }
419}
420
421static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif) 426static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
422{ 427{
423 struct via82cxxx_dev *vdev = kmalloc(sizeof(struct via82cxxx_dev), 428 struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
424 GFP_KERNEL);
425 struct pci_dev *isa = NULL;
426 int i; 429 int i;
427 430
428 if (vdev == NULL) {
429 printk(KERN_ERR "VP_IDE: out of memory :(\n");
430 return;
431 }
432
433 memset(vdev, 0, sizeof(struct via82cxxx_dev));
434 ide_set_hwifdata(hwif, vdev);
435
436 vdev->via_config = via_config_find(&isa);
437 via_cable_detect(hwif->pci_dev, vdev);
438
439 hwif->autodma = 0; 431 hwif->autodma = 0;
440 432
441 hwif->tuneproc = &via82cxxx_tune_drive; 433 hwif->tuneproc = &via82cxxx_tune_drive;
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 2b5d7ab3adf7..4325aac7733d 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -2020,6 +2020,8 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev)
2020 blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); 2020 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
2021 sdev->use_10_for_rw = 1; 2021 sdev->use_10_for_rw = 1;
2022 2022
2023 if (sdev->type == TYPE_ROM)
2024 sdev->use_10_for_ms = 1;
2023 if (sdev->type == TYPE_DISK && 2025 if (sdev->type == TYPE_DISK &&
2024 lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) 2026 lu->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
2025 sdev->skip_ms_page_8 = 1; 2027 sdev->skip_ms_page_8 = 1;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 533193d4e5df..9e0ab048c878 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1088,10 +1088,21 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1088 *sin = iw_event->local_addr; 1088 *sin = iw_event->local_addr;
1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1089 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1090 *sin = iw_event->remote_addr; 1090 *sin = iw_event->remote_addr;
1091 if (iw_event->status) 1091 switch (iw_event->status) {
1092 event.event = RDMA_CM_EVENT_REJECTED; 1092 case 0:
1093 else
1094 event.event = RDMA_CM_EVENT_ESTABLISHED; 1093 event.event = RDMA_CM_EVENT_ESTABLISHED;
1094 break;
1095 case -ECONNRESET:
1096 case -ECONNREFUSED:
1097 event.event = RDMA_CM_EVENT_REJECTED;
1098 break;
1099 case -ETIMEDOUT:
1100 event.event = RDMA_CM_EVENT_UNREACHABLE;
1101 break;
1102 default:
1103 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1104 break;
1105 }
1095 break; 1106 break;
1096 case IW_CM_EVENT_ESTABLISHED: 1107 case IW_CM_EVENT_ESTABLISHED:
1097 event.event = RDMA_CM_EVENT_ESTABLISHED; 1108 event.event = RDMA_CM_EVENT_ESTABLISHED;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 81a5cdc5733a..e2e8d329b443 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -209,10 +209,21 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 209 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
210 if (!ctx->backlog) { 210 if (!ctx->backlog) {
211 ret = -EDQUOT; 211 ret = -EDQUOT;
212 kfree(uevent);
212 goto out; 213 goto out;
213 } 214 }
214 ctx->backlog--; 215 ctx->backlog--;
216 } else if (!ctx->uid) {
217 /*
218 * We ignore events for new connections until userspace has set
219 * their context. This can only happen if an error occurs on a
220 * new connection before the user accepts it. This is okay,
221 * since the accept will just fail later.
222 */
223 kfree(uevent);
224 goto out;
215 } 225 }
226
216 list_add_tail(&uevent->list, &ctx->file->event_list); 227 list_add_tail(&uevent->list, &ctx->file->event_list);
217 wake_up_interruptible(&ctx->file->poll_wait); 228 wake_up_interruptible(&ctx->file->poll_wait);
218out: 229out:
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index e1b618c5f685..b7be950ab47c 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -50,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
50 ib_device); 50 ib_device);
51 struct hipz_query_hca *rblock; 51 struct hipz_query_hca *rblock;
52 52
53 rblock = ehca_alloc_fw_ctrlblock(); 53 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
54 if (!rblock) { 54 if (!rblock) {
55 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 55 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
56 return -ENOMEM; 56 return -ENOMEM;
@@ -110,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev,
110 ib_device); 110 ib_device);
111 struct hipz_query_port *rblock; 111 struct hipz_query_port *rblock;
112 112
113 rblock = ehca_alloc_fw_ctrlblock(); 113 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
114 if (!rblock) { 114 if (!rblock) {
115 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 115 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
116 return -ENOMEM; 116 return -ENOMEM;
@@ -179,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
179 return -EINVAL; 179 return -EINVAL;
180 } 180 }
181 181
182 rblock = ehca_alloc_fw_ctrlblock(); 182 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
183 if (!rblock) { 183 if (!rblock) {
184 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 184 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
185 return -ENOMEM; 185 return -ENOMEM;
@@ -212,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
212 return -EINVAL; 212 return -EINVAL;
213 } 213 }
214 214
215 rblock = ehca_alloc_fw_ctrlblock(); 215 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
216 if (!rblock) { 216 if (!rblock) {
217 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 217 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
218 return -ENOMEM; 218 return -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index c3ea746e9045..e7209afb4250 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -138,7 +138,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
138 u64 *rblock; 138 u64 *rblock;
139 unsigned long block_count; 139 unsigned long block_count;
140 140
141 rblock = ehca_alloc_fw_ctrlblock(); 141 rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
142 if (!rblock) { 142 if (!rblock) {
143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
144 ret = -ENOMEM; 144 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 3720e3032cce..cd7789f0d08e 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -180,10 +180,10 @@ int ehca_mmap_register(u64 physical,void **mapped,
180int ehca_munmap(unsigned long addr, size_t len); 180int ehca_munmap(unsigned long addr, size_t len);
181 181
182#ifdef CONFIG_PPC_64K_PAGES 182#ifdef CONFIG_PPC_64K_PAGES
183void *ehca_alloc_fw_ctrlblock(void); 183void *ehca_alloc_fw_ctrlblock(gfp_t flags);
184void ehca_free_fw_ctrlblock(void *ptr); 184void ehca_free_fw_ctrlblock(void *ptr);
185#else 185#else
186#define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL)) 186#define ehca_alloc_fw_ctrlblock(flags) ((void *) get_zeroed_page(flags))
187#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 187#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
188#endif 188#endif
189 189
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index cc47e4c13a18..6574fbbaead5 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -106,9 +106,9 @@ static struct timer_list poll_eqs_timer;
106#ifdef CONFIG_PPC_64K_PAGES 106#ifdef CONFIG_PPC_64K_PAGES
107static struct kmem_cache *ctblk_cache = NULL; 107static struct kmem_cache *ctblk_cache = NULL;
108 108
109void *ehca_alloc_fw_ctrlblock(void) 109void *ehca_alloc_fw_ctrlblock(gfp_t flags)
110{ 110{
111 void *ret = kmem_cache_zalloc(ctblk_cache, GFP_KERNEL); 111 void *ret = kmem_cache_zalloc(ctblk_cache, flags);
112 if (!ret) 112 if (!ret)
113 ehca_gen_err("Out of memory for ctblk"); 113 ehca_gen_err("Out of memory for ctblk");
114 return ret; 114 return ret;
@@ -206,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
206 u64 h_ret; 206 u64 h_ret;
207 struct hipz_query_hca *rblock; 207 struct hipz_query_hca *rblock;
208 208
209 rblock = ehca_alloc_fw_ctrlblock(); 209 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
210 if (!rblock) { 210 if (!rblock) {
211 ehca_gen_err("Cannot allocate rblock memory."); 211 ehca_gen_err("Cannot allocate rblock memory.");
212 return -ENOMEM; 212 return -ENOMEM;
@@ -258,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca)
258 int ret = 0; 258 int ret = 0;
259 struct hipz_query_hca *rblock; 259 struct hipz_query_hca *rblock;
260 260
261 rblock = ehca_alloc_fw_ctrlblock(); 261 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
262 if (!rblock) { 262 if (!rblock) {
263 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 263 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
264 return -ENOMEM; 264 return -ENOMEM;
@@ -469,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \
469 \ 469 \
470 shca = dev->driver_data; \ 470 shca = dev->driver_data; \
471 \ 471 \
472 rblock = ehca_alloc_fw_ctrlblock(); \ 472 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
473 if (!rblock) { \ 473 if (!rblock) { \
474 dev_err(dev, "Can't allocate rblock memory."); \ 474 dev_err(dev, "Can't allocate rblock memory."); \
475 return 0; \ 475 return 0; \
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 0a5e2214cc5f..cfb362a1029c 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1013 u32 i; 1013 u32 i;
1014 u64 *kpage; 1014 u64 *kpage;
1015 1015
1016 kpage = ehca_alloc_fw_ctrlblock(); 1016 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1017 if (!kpage) { 1017 if (!kpage) {
1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1018 ehca_err(&shca->ib_device, "kpage alloc failed");
1019 ret = -ENOMEM; 1019 ret = -ENOMEM;
@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1124 ehca_mrmw_map_acl(acl, &hipz_acl);
1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1126 1126
1127 kpage = ehca_alloc_fw_ctrlblock(); 1127 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1128 if (!kpage) { 1128 if (!kpage) {
1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1129 ehca_err(&shca->ib_device, "kpage alloc failed");
1130 ret = -ENOMEM; 1130 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index c6c9cef203e3..34b85556d01e 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -807,7 +807,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
807 unsigned long spl_flags = 0; 807 unsigned long spl_flags = 0;
808 808
809 /* do query_qp to obtain current attr values */ 809 /* do query_qp to obtain current attr values */
810 mqpcb = ehca_alloc_fw_ctrlblock(); 810 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
811 if (!mqpcb) { 811 if (!mqpcb) {
812 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 812 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
813 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 813 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
@@ -1273,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp,
1273 return -EINVAL; 1273 return -EINVAL;
1274 } 1274 }
1275 1275
1276 qpcb = ehca_alloc_fw_ctrlblock(); 1276 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1277 if (!qpcb) { 1277 if (!qpcb) {
1278 ehca_err(qp->device,"Out of memory for qpcb " 1278 ehca_err(qp->device,"Out of memory for qpcb "
1279 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); 1279 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 283d50b76c3d..1159c8a0f2c5 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -54,6 +54,10 @@ enum {
54 MTHCA_CQ_ENTRY_SIZE = 0x20 54 MTHCA_CQ_ENTRY_SIZE = 0x20
55}; 55};
56 56
57enum {
58 MTHCA_ATOMIC_BYTE_LEN = 8
59};
60
57/* 61/*
58 * Must be packed because start is 64 bits but only aligned to 32 bits. 62 * Must be packed because start is 64 bits but only aligned to 32 bits.
59 */ 63 */
@@ -599,11 +603,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
599 break; 603 break;
600 case MTHCA_OPCODE_ATOMIC_CS: 604 case MTHCA_OPCODE_ATOMIC_CS:
601 entry->opcode = IB_WC_COMP_SWAP; 605 entry->opcode = IB_WC_COMP_SWAP;
602 entry->byte_len = be32_to_cpu(cqe->byte_cnt); 606 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
603 break; 607 break;
604 case MTHCA_OPCODE_ATOMIC_FA: 608 case MTHCA_OPCODE_ATOMIC_FA:
605 entry->opcode = IB_WC_FETCH_ADD; 609 entry->opcode = IB_WC_FETCH_ADD;
606 entry->byte_len = be32_to_cpu(cqe->byte_cnt); 610 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
607 break; 611 break;
608 case MTHCA_OPCODE_BIND_MW: 612 case MTHCA_OPCODE_BIND_MW:
609 entry->opcode = IB_WC_BIND_MW; 613 entry->opcode = IB_WC_BIND_MW;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 15cc2f6eb475..6b19645d946c 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -232,7 +232,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj)
232 232
233 list_for_each_entry(chunk, &icm->chunk_list, list) { 233 list_for_each_entry(chunk, &icm->chunk_list, list) {
234 for (i = 0; i < chunk->npages; ++i) { 234 for (i = 0; i < chunk->npages; ++i) {
235 if (chunk->mem[i].length >= offset) { 235 if (chunk->mem[i].length > offset) {
236 page = chunk->mem[i].page; 236 page = chunk->mem[i].page;
237 goto out; 237 goto out;
238 } 238 }
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index d844a2569b47..5f5214c0337d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -429,13 +429,18 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
429{ 429{
430 struct mthca_dev *dev = to_mdev(ibqp->device); 430 struct mthca_dev *dev = to_mdev(ibqp->device);
431 struct mthca_qp *qp = to_mqp(ibqp); 431 struct mthca_qp *qp = to_mqp(ibqp);
432 int err; 432 int err = 0;
433 struct mthca_mailbox *mailbox; 433 struct mthca_mailbox *mailbox = NULL;
434 struct mthca_qp_param *qp_param; 434 struct mthca_qp_param *qp_param;
435 struct mthca_qp_context *context; 435 struct mthca_qp_context *context;
436 int mthca_state; 436 int mthca_state;
437 u8 status; 437 u8 status;
438 438
439 if (qp->state == IB_QPS_RESET) {
440 qp_attr->qp_state = IB_QPS_RESET;
441 goto done;
442 }
443
439 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 444 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
440 if (IS_ERR(mailbox)) 445 if (IS_ERR(mailbox))
441 return PTR_ERR(mailbox); 446 return PTR_ERR(mailbox);
@@ -454,7 +459,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
454 mthca_state = be32_to_cpu(context->flags) >> 28; 459 mthca_state = be32_to_cpu(context->flags) >> 28;
455 460
456 qp_attr->qp_state = to_ib_qp_state(mthca_state); 461 qp_attr->qp_state = to_ib_qp_state(mthca_state);
457 qp_attr->cur_qp_state = qp_attr->qp_state;
458 qp_attr->path_mtu = context->mtu_msgmax >> 5; 462 qp_attr->path_mtu = context->mtu_msgmax >> 5;
459 qp_attr->path_mig_state = 463 qp_attr->path_mig_state =
460 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 464 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
@@ -464,11 +468,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
464 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 468 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
465 qp_attr->qp_access_flags = 469 qp_attr->qp_access_flags =
466 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 470 to_ib_qp_access_flags(be32_to_cpu(context->params2));
467 qp_attr->cap.max_send_wr = qp->sq.max;
468 qp_attr->cap.max_recv_wr = qp->rq.max;
469 qp_attr->cap.max_send_sge = qp->sq.max_gs;
470 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
471 qp_attr->cap.max_inline_data = qp->max_inline_data;
472 471
473 if (qp->transport == RC || qp->transport == UC) { 472 if (qp->transport == RC || qp->transport == UC) {
474 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 473 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
@@ -495,7 +494,16 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
495 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 494 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
496 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 495 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
497 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 496 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
498 qp_init_attr->cap = qp_attr->cap; 497
498done:
499 qp_attr->cur_qp_state = qp_attr->qp_state;
500 qp_attr->cap.max_send_wr = qp->sq.max;
501 qp_attr->cap.max_recv_wr = qp->rq.max;
502 qp_attr->cap.max_send_sge = qp->sq.max_gs;
503 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
504 qp_attr->cap.max_inline_data = qp->max_inline_data;
505
506 qp_init_attr->cap = qp_attr->cap;
499 507
500out: 508out:
501 mthca_free_mailbox(dev, mailbox); 509 mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9b2041e25d59..dd221eda3ea6 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -177,7 +177,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
177 * - if yes, the mtask is recycled at iscsi_complete_pdu 177 * - if yes, the mtask is recycled at iscsi_complete_pdu
178 * - if no, the mtask is recycled at iser_snd_completion 178 * - if no, the mtask is recycled at iser_snd_completion
179 */ 179 */
180 if (error && error != -EAGAIN) 180 if (error && error != -ENOBUFS)
181 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 181 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
182 182
183 return error; 183 return error;
@@ -241,7 +241,7 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn,
241 error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); 241 error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask);
242 242
243 iscsi_iser_ctask_xmit_exit: 243 iscsi_iser_ctask_xmit_exit:
244 if (error && error != -EAGAIN) 244 if (error && error != -ENOBUFS)
245 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); 245 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
246 return error; 246 return error;
247} 247}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index e73c87b9be43..0a7d1ab60e6d 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -304,18 +304,14 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
304static int 304static int
305iser_check_xmit(struct iscsi_conn *conn, void *task) 305iser_check_xmit(struct iscsi_conn *conn, void *task)
306{ 306{
307 int rc = 0;
308 struct iscsi_iser_conn *iser_conn = conn->dd_data; 307 struct iscsi_iser_conn *iser_conn = conn->dd_data;
309 308
310 write_lock_bh(conn->recv_lock);
311 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == 309 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
312 ISER_QP_MAX_REQ_DTOS) { 310 ISER_QP_MAX_REQ_DTOS) {
313 iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task); 311 iser_dbg("%ld can't xmit task %p\n",jiffies,task);
314 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); 312 return -ENOBUFS;
315 rc = -EAGAIN;
316 } 313 }
317 write_unlock_bh(conn->recv_lock); 314 return 0;
318 return rc;
319} 315}
320 316
321 317
@@ -340,7 +336,7 @@ int iser_send_command(struct iscsi_conn *conn,
340 return -EPERM; 336 return -EPERM;
341 } 337 }
342 if (iser_check_xmit(conn, ctask)) 338 if (iser_check_xmit(conn, ctask))
343 return -EAGAIN; 339 return -ENOBUFS;
344 340
345 edtl = ntohl(hdr->data_length); 341 edtl = ntohl(hdr->data_length);
346 342
@@ -426,7 +422,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
426 } 422 }
427 423
428 if (iser_check_xmit(conn, ctask)) 424 if (iser_check_xmit(conn, ctask))
429 return -EAGAIN; 425 return -ENOBUFS;
430 426
431 itt = ntohl(hdr->itt); 427 itt = ntohl(hdr->itt);
432 data_seg_len = ntoh24(hdr->dlength); 428 data_seg_len = ntoh24(hdr->dlength);
@@ -498,7 +494,7 @@ int iser_send_control(struct iscsi_conn *conn,
498 } 494 }
499 495
500 if (iser_check_xmit(conn,mtask)) 496 if (iser_check_xmit(conn,mtask))
501 return -EAGAIN; 497 return -ENOBUFS;
502 498
503 /* build the tx desc regd header and add it to the tx desc dto */ 499 /* build the tx desc regd header and add it to the tx desc dto */
504 mdesc->type = ISCSI_TX_CONTROL; 500 mdesc->type = ISCSI_TX_CONTROL;
@@ -605,6 +601,7 @@ void iser_snd_completion(struct iser_desc *tx_desc)
605 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; 601 struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
606 struct iscsi_conn *conn = iser_conn->iscsi_conn; 602 struct iscsi_conn *conn = iser_conn->iscsi_conn;
607 struct iscsi_mgmt_task *mtask; 603 struct iscsi_mgmt_task *mtask;
604 int resume_tx = 0;
608 605
609 iser_dbg("Initiator, Data sent dto=0x%p\n", dto); 606 iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
610 607
@@ -613,15 +610,16 @@ void iser_snd_completion(struct iser_desc *tx_desc)
613 if (tx_desc->type == ISCSI_TX_DATAOUT) 610 if (tx_desc->type == ISCSI_TX_DATAOUT)
614 kmem_cache_free(ig.desc_cache, tx_desc); 611 kmem_cache_free(ig.desc_cache, tx_desc);
615 612
613 if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
614 ISER_QP_MAX_REQ_DTOS)
615 resume_tx = 1;
616
616 atomic_dec(&ib_conn->post_send_buf_count); 617 atomic_dec(&ib_conn->post_send_buf_count);
617 618
618 write_lock(conn->recv_lock); 619 if (resume_tx) {
619 if (conn->suspend_tx) {
620 iser_dbg("%ld resuming tx\n",jiffies); 620 iser_dbg("%ld resuming tx\n",jiffies);
621 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
622 scsi_queue_work(conn->session->host, &conn->xmitwork); 621 scsi_queue_work(conn->session->host, &conn->xmitwork);
623 } 622 }
624 write_unlock(conn->recv_lock);
625 623
626 if (tx_desc->type == ISCSI_TX_CONTROL) { 624 if (tx_desc->type == ISCSI_TX_CONTROL) {
627 /* this arithmetic is legal by libiscsi dd_data allocation */ 625 /* this arithmetic is legal by libiscsi dd_data allocation */
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 100df6f38d92..91e0c75aca8f 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -52,6 +52,8 @@
52#define KVM_MAX_VCPUS 1 52#define KVM_MAX_VCPUS 1
53#define KVM_MEMORY_SLOTS 4 53#define KVM_MEMORY_SLOTS 4
54#define KVM_NUM_MMU_PAGES 256 54#define KVM_NUM_MMU_PAGES 256
55#define KVM_MIN_FREE_MMU_PAGES 5
56#define KVM_REFILL_PAGES 25
55 57
56#define FX_IMAGE_SIZE 512 58#define FX_IMAGE_SIZE 512
57#define FX_IMAGE_ALIGN 16 59#define FX_IMAGE_ALIGN 16
@@ -89,14 +91,54 @@ typedef unsigned long hva_t;
89typedef u64 hpa_t; 91typedef u64 hpa_t;
90typedef unsigned long hfn_t; 92typedef unsigned long hfn_t;
91 93
94#define NR_PTE_CHAIN_ENTRIES 5
95
96struct kvm_pte_chain {
97 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
98 struct hlist_node link;
99};
100
101/*
102 * kvm_mmu_page_role, below, is defined as:
103 *
104 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
105 * bits 4:7 - page table level for this shadow (1-4)
106 * bits 8:9 - page table quadrant for 2-level guests
107 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
108 */
109union kvm_mmu_page_role {
110 unsigned word;
111 struct {
112 unsigned glevels : 4;
113 unsigned level : 4;
114 unsigned quadrant : 2;
115 unsigned pad_for_nice_hex_output : 6;
116 unsigned metaphysical : 1;
117 };
118};
119
92struct kvm_mmu_page { 120struct kvm_mmu_page {
93 struct list_head link; 121 struct list_head link;
122 struct hlist_node hash_link;
123
124 /*
125 * The following two entries are used to key the shadow page in the
126 * hash table.
127 */
128 gfn_t gfn;
129 union kvm_mmu_page_role role;
130
94 hpa_t page_hpa; 131 hpa_t page_hpa;
95 unsigned long slot_bitmap; /* One bit set per slot which has memory 132 unsigned long slot_bitmap; /* One bit set per slot which has memory
96 * in this shadow page. 133 * in this shadow page.
97 */ 134 */
98 int global; /* Set if all ptes in this page are global */ 135 int global; /* Set if all ptes in this page are global */
99 u64 *parent_pte; 136 int multimapped; /* More than one parent_pte? */
137 int root_count; /* Currently serving as active root */
138 union {
139 u64 *parent_pte; /* !multimapped */
140 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
141 };
100}; 142};
101 143
102struct vmcs { 144struct vmcs {
@@ -117,14 +159,26 @@ struct kvm_vcpu;
117struct kvm_mmu { 159struct kvm_mmu {
118 void (*new_cr3)(struct kvm_vcpu *vcpu); 160 void (*new_cr3)(struct kvm_vcpu *vcpu);
119 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 161 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
120 void (*inval_page)(struct kvm_vcpu *vcpu, gva_t gva);
121 void (*free)(struct kvm_vcpu *vcpu); 162 void (*free)(struct kvm_vcpu *vcpu);
122 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); 163 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
123 hpa_t root_hpa; 164 hpa_t root_hpa;
124 int root_level; 165 int root_level;
125 int shadow_root_level; 166 int shadow_root_level;
167
168 u64 *pae_root;
169};
170
171#define KVM_NR_MEM_OBJS 20
172
173struct kvm_mmu_memory_cache {
174 int nobjs;
175 void *objects[KVM_NR_MEM_OBJS];
126}; 176};
127 177
178/*
179 * We don't want allocation failures within the mmu code, so we preallocate
180 * enough memory for a single page fault in a cache.
181 */
128struct kvm_guest_debug { 182struct kvm_guest_debug {
129 int enabled; 183 int enabled;
130 unsigned long bp[4]; 184 unsigned long bp[4];
@@ -173,6 +227,7 @@ struct kvm_vcpu {
173 struct mutex mutex; 227 struct mutex mutex;
174 int cpu; 228 int cpu;
175 int launched; 229 int launched;
230 int interrupt_window_open;
176 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 231 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
177#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) 232#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
178 unsigned long irq_pending[NR_IRQ_WORDS]; 233 unsigned long irq_pending[NR_IRQ_WORDS];
@@ -184,6 +239,7 @@ struct kvm_vcpu {
184 unsigned long cr3; 239 unsigned long cr3;
185 unsigned long cr4; 240 unsigned long cr4;
186 unsigned long cr8; 241 unsigned long cr8;
242 u64 pdptrs[4]; /* pae */
187 u64 shadow_efer; 243 u64 shadow_efer;
188 u64 apic_base; 244 u64 apic_base;
189 int nmsrs; 245 int nmsrs;
@@ -194,6 +250,12 @@ struct kvm_vcpu {
194 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; 250 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
195 struct kvm_mmu mmu; 251 struct kvm_mmu mmu;
196 252
253 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
254 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
255
256 gfn_t last_pt_write_gfn;
257 int last_pt_write_count;
258
197 struct kvm_guest_debug guest_debug; 259 struct kvm_guest_debug guest_debug;
198 260
199 char fx_buf[FX_BUF_SIZE]; 261 char fx_buf[FX_BUF_SIZE];
@@ -231,10 +293,16 @@ struct kvm {
231 spinlock_t lock; /* protects everything except vcpus */ 293 spinlock_t lock; /* protects everything except vcpus */
232 int nmemslots; 294 int nmemslots;
233 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS]; 295 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
296 /*
297 * Hash table of struct kvm_mmu_page.
298 */
234 struct list_head active_mmu_pages; 299 struct list_head active_mmu_pages;
300 int n_free_mmu_pages;
301 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
235 struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; 302 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
236 int memory_config_version; 303 int memory_config_version;
237 int busy; 304 int busy;
305 unsigned long rmap_overflow;
238}; 306};
239 307
240struct kvm_stat { 308struct kvm_stat {
@@ -247,6 +315,9 @@ struct kvm_stat {
247 u32 io_exits; 315 u32 io_exits;
248 u32 mmio_exits; 316 u32 mmio_exits;
249 u32 signal_exits; 317 u32 signal_exits;
318 u32 irq_window_exits;
319 u32 halt_exits;
320 u32 request_irq_exits;
250 u32 irq_exits; 321 u32 irq_exits;
251}; 322};
252 323
@@ -279,6 +350,7 @@ struct kvm_arch_ops {
279 void (*set_segment)(struct kvm_vcpu *vcpu, 350 void (*set_segment)(struct kvm_vcpu *vcpu,
280 struct kvm_segment *var, int seg); 351 struct kvm_segment *var, int seg);
281 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 352 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
353 void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu);
282 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 354 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
283 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu, 355 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu,
284 unsigned long cr0); 356 unsigned long cr0);
@@ -323,7 +395,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu);
323int kvm_mmu_setup(struct kvm_vcpu *vcpu); 395int kvm_mmu_setup(struct kvm_vcpu *vcpu);
324 396
325int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 397int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
326void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 398void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot);
327 399
328hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); 400hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
329#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 401#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
@@ -396,6 +468,19 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
396 468
397unsigned long segment_base(u16 selector); 469unsigned long segment_base(u16 selector);
398 470
471void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
472void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
473int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
474void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
475
476static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
477 u32 error_code)
478{
479 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
480 kvm_mmu_free_some_pages(vcpu);
481 return vcpu->mmu.page_fault(vcpu, gva, error_code);
482}
483
399static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) 484static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn)
400{ 485{
401 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 486 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
@@ -541,19 +626,4 @@ static inline u32 get_rdx_init_val(void)
541#define TSS_REDIRECTION_SIZE (256 / 8) 626#define TSS_REDIRECTION_SIZE (256 / 8)
542#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 627#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
543 628
544#ifdef CONFIG_X86_64
545
546/*
547 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. Therefore
548 * we need to allocate shadow page tables in the first 4GB of memory, which
549 * happens to fit the DMA32 zone.
550 */
551#define GFP_KVM_MMU (GFP_KERNEL | __GFP_DMA32)
552
553#else
554
555#define GFP_KVM_MMU GFP_KERNEL
556
557#endif
558
559#endif 629#endif
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index ce7fe640f18d..67c1154960f0 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -58,6 +58,9 @@ static struct kvm_stats_debugfs_item {
58 { "io_exits", &kvm_stat.io_exits }, 58 { "io_exits", &kvm_stat.io_exits },
59 { "mmio_exits", &kvm_stat.mmio_exits }, 59 { "mmio_exits", &kvm_stat.mmio_exits },
60 { "signal_exits", &kvm_stat.signal_exits }, 60 { "signal_exits", &kvm_stat.signal_exits },
61 { "irq_window", &kvm_stat.irq_window_exits },
62 { "halt_exits", &kvm_stat.halt_exits },
63 { "request_irq", &kvm_stat.request_irq_exits },
61 { "irq_exits", &kvm_stat.irq_exits }, 64 { "irq_exits", &kvm_stat.irq_exits },
62 { 0, 0 } 65 { 0, 0 }
63}; 66};
@@ -227,6 +230,7 @@ static int kvm_dev_open(struct inode *inode, struct file *filp)
227 struct kvm_vcpu *vcpu = &kvm->vcpus[i]; 230 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
228 231
229 mutex_init(&vcpu->mutex); 232 mutex_init(&vcpu->mutex);
233 vcpu->kvm = kvm;
230 vcpu->mmu.root_hpa = INVALID_PAGE; 234 vcpu->mmu.root_hpa = INVALID_PAGE;
231 INIT_LIST_HEAD(&vcpu->free_pages); 235 INIT_LIST_HEAD(&vcpu->free_pages);
232 } 236 }
@@ -268,8 +272,8 @@ static void kvm_free_physmem(struct kvm *kvm)
268 272
269static void kvm_free_vcpu(struct kvm_vcpu *vcpu) 273static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
270{ 274{
271 kvm_arch_ops->vcpu_free(vcpu);
272 kvm_mmu_destroy(vcpu); 275 kvm_mmu_destroy(vcpu);
276 kvm_arch_ops->vcpu_free(vcpu);
273} 277}
274 278
275static void kvm_free_vcpus(struct kvm *kvm) 279static void kvm_free_vcpus(struct kvm *kvm)
@@ -295,14 +299,17 @@ static void inject_gp(struct kvm_vcpu *vcpu)
295 kvm_arch_ops->inject_gp(vcpu, 0); 299 kvm_arch_ops->inject_gp(vcpu, 0);
296} 300}
297 301
298static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu, 302/*
299 unsigned long cr3) 303 * Load the pae pdptrs. Return true is they are all valid.
304 */
305static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
300{ 306{
301 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; 307 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
302 unsigned offset = (cr3 & (PAGE_SIZE-1)) >> 5; 308 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
303 int i; 309 int i;
304 u64 pdpte; 310 u64 pdpte;
305 u64 *pdpt; 311 u64 *pdpt;
312 int ret;
306 struct kvm_memory_slot *memslot; 313 struct kvm_memory_slot *memslot;
307 314
308 spin_lock(&vcpu->kvm->lock); 315 spin_lock(&vcpu->kvm->lock);
@@ -310,16 +317,23 @@ static int pdptrs_have_reserved_bits_set(struct kvm_vcpu *vcpu,
310 /* FIXME: !memslot - emulate? 0xff? */ 317 /* FIXME: !memslot - emulate? 0xff? */
311 pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0); 318 pdpt = kmap_atomic(gfn_to_page(memslot, pdpt_gfn), KM_USER0);
312 319
320 ret = 1;
313 for (i = 0; i < 4; ++i) { 321 for (i = 0; i < 4; ++i) {
314 pdpte = pdpt[offset + i]; 322 pdpte = pdpt[offset + i];
315 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) 323 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
316 break; 324 ret = 0;
325 goto out;
326 }
317 } 327 }
318 328
329 for (i = 0; i < 4; ++i)
330 vcpu->pdptrs[i] = pdpt[offset + i];
331
332out:
319 kunmap_atomic(pdpt, KM_USER0); 333 kunmap_atomic(pdpt, KM_USER0);
320 spin_unlock(&vcpu->kvm->lock); 334 spin_unlock(&vcpu->kvm->lock);
321 335
322 return i != 4; 336 return ret;
323} 337}
324 338
325void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 339void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
@@ -365,8 +379,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
365 } 379 }
366 } else 380 } else
367#endif 381#endif
368 if (is_pae(vcpu) && 382 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
369 pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) {
370 printk(KERN_DEBUG "set_cr0: #GP, pdptrs " 383 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
371 "reserved bits\n"); 384 "reserved bits\n");
372 inject_gp(vcpu); 385 inject_gp(vcpu);
@@ -387,6 +400,7 @@ EXPORT_SYMBOL_GPL(set_cr0);
387 400
388void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 401void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
389{ 402{
403 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
390 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); 404 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
391} 405}
392EXPORT_SYMBOL_GPL(lmsw); 406EXPORT_SYMBOL_GPL(lmsw);
@@ -407,7 +421,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
407 return; 421 return;
408 } 422 }
409 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) 423 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
410 && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) { 424 && !load_pdptrs(vcpu, vcpu->cr3)) {
411 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); 425 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
412 inject_gp(vcpu); 426 inject_gp(vcpu);
413 } 427 }
@@ -439,7 +453,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
439 return; 453 return;
440 } 454 }
441 if (is_paging(vcpu) && is_pae(vcpu) && 455 if (is_paging(vcpu) && is_pae(vcpu) &&
442 pdptrs_have_reserved_bits_set(vcpu, cr3)) { 456 !load_pdptrs(vcpu, cr3)) {
443 printk(KERN_DEBUG "set_cr3: #GP, pdptrs " 457 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
444 "reserved bits\n"); 458 "reserved bits\n");
445 inject_gp(vcpu); 459 inject_gp(vcpu);
@@ -449,7 +463,19 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
449 463
450 vcpu->cr3 = cr3; 464 vcpu->cr3 = cr3;
451 spin_lock(&vcpu->kvm->lock); 465 spin_lock(&vcpu->kvm->lock);
452 vcpu->mmu.new_cr3(vcpu); 466 /*
467 * Does the new cr3 value map to physical memory? (Note, we
468 * catch an invalid cr3 even in real-mode, because it would
469 * cause trouble later on when we turn on paging anyway.)
470 *
471 * A real CPU would silently accept an invalid cr3 and would
472 * attempt to use it - with largely undefined (and often hard
473 * to debug) behavior on the guest side.
474 */
475 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
476 inject_gp(vcpu);
477 else
478 vcpu->mmu.new_cr3(vcpu);
453 spin_unlock(&vcpu->kvm->lock); 479 spin_unlock(&vcpu->kvm->lock);
454} 480}
455EXPORT_SYMBOL_GPL(set_cr3); 481EXPORT_SYMBOL_GPL(set_cr3);
@@ -517,7 +543,6 @@ static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
517 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; 543 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
518 544
519 vcpu->cpu = -1; /* First load will set up TR */ 545 vcpu->cpu = -1; /* First load will set up TR */
520 vcpu->kvm = kvm;
521 r = kvm_arch_ops->vcpu_create(vcpu); 546 r = kvm_arch_ops->vcpu_create(vcpu);
522 if (r < 0) 547 if (r < 0)
523 goto out_free_vcpus; 548 goto out_free_vcpus;
@@ -634,6 +659,7 @@ raced:
634 | __GFP_ZERO); 659 | __GFP_ZERO);
635 if (!new.phys_mem[i]) 660 if (!new.phys_mem[i])
636 goto out_free; 661 goto out_free;
662 new.phys_mem[i]->private = 0;
637 } 663 }
638 } 664 }
639 665
@@ -688,6 +714,13 @@ out:
688 return r; 714 return r;
689} 715}
690 716
717static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
718{
719 spin_lock(&vcpu->kvm->lock);
720 kvm_mmu_slot_remove_write_access(vcpu, slot);
721 spin_unlock(&vcpu->kvm->lock);
722}
723
691/* 724/*
692 * Get (and clear) the dirty memory log for a memory slot. 725 * Get (and clear) the dirty memory log for a memory slot.
693 */ 726 */
@@ -697,6 +730,7 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
697 struct kvm_memory_slot *memslot; 730 struct kvm_memory_slot *memslot;
698 int r, i; 731 int r, i;
699 int n; 732 int n;
733 int cleared;
700 unsigned long any = 0; 734 unsigned long any = 0;
701 735
702 spin_lock(&kvm->lock); 736 spin_lock(&kvm->lock);
@@ -727,15 +761,17 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
727 761
728 762
729 if (any) { 763 if (any) {
730 spin_lock(&kvm->lock); 764 cleared = 0;
731 kvm_mmu_slot_remove_write_access(kvm, log->slot);
732 spin_unlock(&kvm->lock);
733 memset(memslot->dirty_bitmap, 0, n);
734 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 765 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
735 struct kvm_vcpu *vcpu = vcpu_load(kvm, i); 766 struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
736 767
737 if (!vcpu) 768 if (!vcpu)
738 continue; 769 continue;
770 if (!cleared) {
771 do_remove_write_access(vcpu, log->slot);
772 memset(memslot->dirty_bitmap, 0, n);
773 cleared = 1;
774 }
739 kvm_arch_ops->tlb_flush(vcpu); 775 kvm_arch_ops->tlb_flush(vcpu);
740 vcpu_put(vcpu); 776 vcpu_put(vcpu);
741 } 777 }
@@ -863,6 +899,27 @@ static int emulator_read_emulated(unsigned long addr,
863 } 899 }
864} 900}
865 901
902static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
903 unsigned long val, int bytes)
904{
905 struct kvm_memory_slot *m;
906 struct page *page;
907 void *virt;
908
909 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
910 return 0;
911 m = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
912 if (!m)
913 return 0;
914 page = gfn_to_page(m, gpa >> PAGE_SHIFT);
915 kvm_mmu_pre_write(vcpu, gpa, bytes);
916 virt = kmap_atomic(page, KM_USER0);
917 memcpy(virt + offset_in_page(gpa), &val, bytes);
918 kunmap_atomic(virt, KM_USER0);
919 kvm_mmu_post_write(vcpu, gpa, bytes);
920 return 1;
921}
922
866static int emulator_write_emulated(unsigned long addr, 923static int emulator_write_emulated(unsigned long addr,
867 unsigned long val, 924 unsigned long val,
868 unsigned int bytes, 925 unsigned int bytes,
@@ -874,6 +931,9 @@ static int emulator_write_emulated(unsigned long addr,
874 if (gpa == UNMAPPED_GVA) 931 if (gpa == UNMAPPED_GVA)
875 return X86EMUL_PROPAGATE_FAULT; 932 return X86EMUL_PROPAGATE_FAULT;
876 933
934 if (emulator_write_phys(vcpu, gpa, val, bytes))
935 return X86EMUL_CONTINUE;
936
877 vcpu->mmio_needed = 1; 937 vcpu->mmio_needed = 1;
878 vcpu->mmio_phys_addr = gpa; 938 vcpu->mmio_phys_addr = gpa;
879 vcpu->mmio_size = bytes; 939 vcpu->mmio_size = bytes;
@@ -898,6 +958,30 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
898 return emulator_write_emulated(addr, new, bytes, ctxt); 958 return emulator_write_emulated(addr, new, bytes, ctxt);
899} 959}
900 960
961#ifdef CONFIG_X86_32
962
963static int emulator_cmpxchg8b_emulated(unsigned long addr,
964 unsigned long old_lo,
965 unsigned long old_hi,
966 unsigned long new_lo,
967 unsigned long new_hi,
968 struct x86_emulate_ctxt *ctxt)
969{
970 static int reported;
971 int r;
972
973 if (!reported) {
974 reported = 1;
975 printk(KERN_WARNING "kvm: emulating exchange8b as write\n");
976 }
977 r = emulator_write_emulated(addr, new_lo, 4, ctxt);
978 if (r != X86EMUL_CONTINUE)
979 return r;
980 return emulator_write_emulated(addr+4, new_hi, 4, ctxt);
981}
982
983#endif
984
901static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) 985static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
902{ 986{
903 return kvm_arch_ops->get_segment_base(vcpu, seg); 987 return kvm_arch_ops->get_segment_base(vcpu, seg);
@@ -905,18 +989,15 @@ static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
905 989
906int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) 990int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
907{ 991{
908 spin_lock(&vcpu->kvm->lock);
909 vcpu->mmu.inval_page(vcpu, address);
910 spin_unlock(&vcpu->kvm->lock);
911 kvm_arch_ops->invlpg(vcpu, address);
912 return X86EMUL_CONTINUE; 992 return X86EMUL_CONTINUE;
913} 993}
914 994
915int emulate_clts(struct kvm_vcpu *vcpu) 995int emulate_clts(struct kvm_vcpu *vcpu)
916{ 996{
917 unsigned long cr0 = vcpu->cr0; 997 unsigned long cr0;
918 998
919 cr0 &= ~CR0_TS_MASK; 999 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
1000 cr0 = vcpu->cr0 & ~CR0_TS_MASK;
920 kvm_arch_ops->set_cr0(vcpu, cr0); 1001 kvm_arch_ops->set_cr0(vcpu, cr0);
921 return X86EMUL_CONTINUE; 1002 return X86EMUL_CONTINUE;
922} 1003}
@@ -975,6 +1056,9 @@ struct x86_emulate_ops emulate_ops = {
975 .read_emulated = emulator_read_emulated, 1056 .read_emulated = emulator_read_emulated,
976 .write_emulated = emulator_write_emulated, 1057 .write_emulated = emulator_write_emulated,
977 .cmpxchg_emulated = emulator_cmpxchg_emulated, 1058 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1059#ifdef CONFIG_X86_32
1060 .cmpxchg8b_emulated = emulator_cmpxchg8b_emulated,
1061#endif
978}; 1062};
979 1063
980int emulate_instruction(struct kvm_vcpu *vcpu, 1064int emulate_instruction(struct kvm_vcpu *vcpu,
@@ -1024,6 +1108,8 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1024 } 1108 }
1025 1109
1026 if (r) { 1110 if (r) {
1111 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1112 return EMULATE_DONE;
1027 if (!vcpu->mmio_needed) { 1113 if (!vcpu->mmio_needed) {
1028 report_emulation_failure(&emulate_ctxt); 1114 report_emulation_failure(&emulate_ctxt);
1029 return EMULATE_FAIL; 1115 return EMULATE_FAIL;
@@ -1069,6 +1155,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1069 1155
1070unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) 1156unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1071{ 1157{
1158 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
1072 switch (cr) { 1159 switch (cr) {
1073 case 0: 1160 case 0:
1074 return vcpu->cr0; 1161 return vcpu->cr0;
@@ -1403,6 +1490,7 @@ static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1403 sregs->gdt.limit = dt.limit; 1490 sregs->gdt.limit = dt.limit;
1404 sregs->gdt.base = dt.base; 1491 sregs->gdt.base = dt.base;
1405 1492
1493 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
1406 sregs->cr0 = vcpu->cr0; 1494 sregs->cr0 = vcpu->cr0;
1407 sregs->cr2 = vcpu->cr2; 1495 sregs->cr2 = vcpu->cr2;
1408 sregs->cr3 = vcpu->cr3; 1496 sregs->cr3 = vcpu->cr3;
@@ -1467,11 +1555,15 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
1467#endif 1555#endif
1468 vcpu->apic_base = sregs->apic_base; 1556 vcpu->apic_base = sregs->apic_base;
1469 1557
1558 kvm_arch_ops->decache_cr0_cr4_guest_bits(vcpu);
1559
1470 mmu_reset_needed |= vcpu->cr0 != sregs->cr0; 1560 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
1471 kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0); 1561 kvm_arch_ops->set_cr0_no_modeswitch(vcpu, sregs->cr0);
1472 1562
1473 mmu_reset_needed |= vcpu->cr4 != sregs->cr4; 1563 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
1474 kvm_arch_ops->set_cr4(vcpu, sregs->cr4); 1564 kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
1565 if (!is_long_mode(vcpu) && is_pae(vcpu))
1566 load_pdptrs(vcpu, vcpu->cr3);
1475 1567
1476 if (mmu_reset_needed) 1568 if (mmu_reset_needed)
1477 kvm_mmu_reset_context(vcpu); 1569 kvm_mmu_reset_context(vcpu);
@@ -1693,12 +1785,12 @@ static long kvm_dev_ioctl(struct file *filp,
1693 if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run)) 1785 if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run))
1694 goto out; 1786 goto out;
1695 r = kvm_dev_ioctl_run(kvm, &kvm_run); 1787 r = kvm_dev_ioctl_run(kvm, &kvm_run);
1696 if (r < 0) 1788 if (r < 0 && r != -EINTR)
1697 goto out; 1789 goto out;
1698 r = -EFAULT; 1790 if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) {
1699 if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) 1791 r = -EFAULT;
1700 goto out; 1792 goto out;
1701 r = 0; 1793 }
1702 break; 1794 break;
1703 } 1795 }
1704 case KVM_GET_REGS: { 1796 case KVM_GET_REGS: {
@@ -1842,6 +1934,7 @@ static long kvm_dev_ioctl(struct file *filp,
1842 num_msrs_to_save * sizeof(u32))) 1934 num_msrs_to_save * sizeof(u32)))
1843 goto out; 1935 goto out;
1844 r = 0; 1936 r = 0;
1937 break;
1845 } 1938 }
1846 default: 1939 default:
1847 ; 1940 ;
@@ -1944,17 +2037,17 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
1944 return -EEXIST; 2037 return -EEXIST;
1945 } 2038 }
1946 2039
1947 kvm_arch_ops = ops; 2040 if (!ops->cpu_has_kvm_support()) {
1948
1949 if (!kvm_arch_ops->cpu_has_kvm_support()) {
1950 printk(KERN_ERR "kvm: no hardware support\n"); 2041 printk(KERN_ERR "kvm: no hardware support\n");
1951 return -EOPNOTSUPP; 2042 return -EOPNOTSUPP;
1952 } 2043 }
1953 if (kvm_arch_ops->disabled_by_bios()) { 2044 if (ops->disabled_by_bios()) {
1954 printk(KERN_ERR "kvm: disabled by bios\n"); 2045 printk(KERN_ERR "kvm: disabled by bios\n");
1955 return -EOPNOTSUPP; 2046 return -EOPNOTSUPP;
1956 } 2047 }
1957 2048
2049 kvm_arch_ops = ops;
2050
1958 r = kvm_arch_ops->hardware_setup(); 2051 r = kvm_arch_ops->hardware_setup();
1959 if (r < 0) 2052 if (r < 0)
1960 return r; 2053 return r;
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 790423c5f23d..c6f972914f08 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -26,7 +26,31 @@
26#include "vmx.h" 26#include "vmx.h"
27#include "kvm.h" 27#include "kvm.h"
28 28
29#undef MMU_DEBUG
30
31#undef AUDIT
32
33#ifdef AUDIT
34static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
35#else
36static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
37#endif
38
39#ifdef MMU_DEBUG
40
41#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
42#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
43
44#else
45
29#define pgprintk(x...) do { } while (0) 46#define pgprintk(x...) do { } while (0)
47#define rmap_printk(x...) do { } while (0)
48
49#endif
50
51#if defined(MMU_DEBUG) || defined(AUDIT)
52static int dbg = 1;
53#endif
30 54
31#define ASSERT(x) \ 55#define ASSERT(x) \
32 if (!(x)) { \ 56 if (!(x)) { \
@@ -34,8 +58,10 @@
34 __FILE__, __LINE__, #x); \ 58 __FILE__, __LINE__, #x); \
35 } 59 }
36 60
37#define PT64_ENT_PER_PAGE 512 61#define PT64_PT_BITS 9
38#define PT32_ENT_PER_PAGE 1024 62#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
63#define PT32_PT_BITS 10
64#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
39 65
40#define PT_WRITABLE_SHIFT 1 66#define PT_WRITABLE_SHIFT 1
41 67
@@ -125,6 +151,13 @@
125#define PT_DIRECTORY_LEVEL 2 151#define PT_DIRECTORY_LEVEL 2
126#define PT_PAGE_TABLE_LEVEL 1 152#define PT_PAGE_TABLE_LEVEL 1
127 153
154#define RMAP_EXT 4
155
156struct kvm_rmap_desc {
157 u64 *shadow_ptes[RMAP_EXT];
158 struct kvm_rmap_desc *more;
159};
160
128static int is_write_protection(struct kvm_vcpu *vcpu) 161static int is_write_protection(struct kvm_vcpu *vcpu)
129{ 162{
130 return vcpu->cr0 & CR0_WP_MASK; 163 return vcpu->cr0 & CR0_WP_MASK;
@@ -150,32 +183,272 @@ static int is_io_pte(unsigned long pte)
150 return pte & PT_SHADOW_IO_MARK; 183 return pte & PT_SHADOW_IO_MARK;
151} 184}
152 185
186static int is_rmap_pte(u64 pte)
187{
188 return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
189 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
190}
191
192static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
193 size_t objsize, int min)
194{
195 void *obj;
196
197 if (cache->nobjs >= min)
198 return 0;
199 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
200 obj = kzalloc(objsize, GFP_NOWAIT);
201 if (!obj)
202 return -ENOMEM;
203 cache->objects[cache->nobjs++] = obj;
204 }
205 return 0;
206}
207
208static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
209{
210 while (mc->nobjs)
211 kfree(mc->objects[--mc->nobjs]);
212}
213
214static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
215{
216 int r;
217
218 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
219 sizeof(struct kvm_pte_chain), 4);
220 if (r)
221 goto out;
222 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
223 sizeof(struct kvm_rmap_desc), 1);
224out:
225 return r;
226}
227
228static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
229{
230 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
231 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
232}
233
234static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
235 size_t size)
236{
237 void *p;
238
239 BUG_ON(!mc->nobjs);
240 p = mc->objects[--mc->nobjs];
241 memset(p, 0, size);
242 return p;
243}
244
245static void mmu_memory_cache_free(struct kvm_mmu_memory_cache *mc, void *obj)
246{
247 if (mc->nobjs < KVM_NR_MEM_OBJS)
248 mc->objects[mc->nobjs++] = obj;
249 else
250 kfree(obj);
251}
252
253static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
254{
255 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
256 sizeof(struct kvm_pte_chain));
257}
258
259static void mmu_free_pte_chain(struct kvm_vcpu *vcpu,
260 struct kvm_pte_chain *pc)
261{
262 mmu_memory_cache_free(&vcpu->mmu_pte_chain_cache, pc);
263}
264
265static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
266{
267 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
268 sizeof(struct kvm_rmap_desc));
269}
270
271static void mmu_free_rmap_desc(struct kvm_vcpu *vcpu,
272 struct kvm_rmap_desc *rd)
273{
274 mmu_memory_cache_free(&vcpu->mmu_rmap_desc_cache, rd);
275}
276
277/*
278 * Reverse mapping data structures:
279 *
280 * If page->private bit zero is zero, then page->private points to the
281 * shadow page table entry that points to page_address(page).
282 *
283 * If page->private bit zero is one, (then page->private & ~1) points
284 * to a struct kvm_rmap_desc containing more mappings.
285 */
286static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
287{
288 struct page *page;
289 struct kvm_rmap_desc *desc;
290 int i;
291
292 if (!is_rmap_pte(*spte))
293 return;
294 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
295 if (!page->private) {
296 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
297 page->private = (unsigned long)spte;
298 } else if (!(page->private & 1)) {
299 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
300 desc = mmu_alloc_rmap_desc(vcpu);
301 desc->shadow_ptes[0] = (u64 *)page->private;
302 desc->shadow_ptes[1] = spte;
303 page->private = (unsigned long)desc | 1;
304 } else {
305 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
306 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
307 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
308 desc = desc->more;
309 if (desc->shadow_ptes[RMAP_EXT-1]) {
310 desc->more = mmu_alloc_rmap_desc(vcpu);
311 desc = desc->more;
312 }
313 for (i = 0; desc->shadow_ptes[i]; ++i)
314 ;
315 desc->shadow_ptes[i] = spte;
316 }
317}
318
319static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
320 struct page *page,
321 struct kvm_rmap_desc *desc,
322 int i,
323 struct kvm_rmap_desc *prev_desc)
324{
325 int j;
326
327 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
328 ;
329 desc->shadow_ptes[i] = desc->shadow_ptes[j];
330 desc->shadow_ptes[j] = 0;
331 if (j != 0)
332 return;
333 if (!prev_desc && !desc->more)
334 page->private = (unsigned long)desc->shadow_ptes[0];
335 else
336 if (prev_desc)
337 prev_desc->more = desc->more;
338 else
339 page->private = (unsigned long)desc->more | 1;
340 mmu_free_rmap_desc(vcpu, desc);
341}
342
343static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
344{
345 struct page *page;
346 struct kvm_rmap_desc *desc;
347 struct kvm_rmap_desc *prev_desc;
348 int i;
349
350 if (!is_rmap_pte(*spte))
351 return;
352 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
353 if (!page->private) {
354 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
355 BUG();
356 } else if (!(page->private & 1)) {
357 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
358 if ((u64 *)page->private != spte) {
359 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
360 spte, *spte);
361 BUG();
362 }
363 page->private = 0;
364 } else {
365 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
366 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
367 prev_desc = NULL;
368 while (desc) {
369 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
370 if (desc->shadow_ptes[i] == spte) {
371 rmap_desc_remove_entry(vcpu, page,
372 desc, i,
373 prev_desc);
374 return;
375 }
376 prev_desc = desc;
377 desc = desc->more;
378 }
379 BUG();
380 }
381}
382
383static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
384{
385 struct kvm *kvm = vcpu->kvm;
386 struct page *page;
387 struct kvm_memory_slot *slot;
388 struct kvm_rmap_desc *desc;
389 u64 *spte;
390
391 slot = gfn_to_memslot(kvm, gfn);
392 BUG_ON(!slot);
393 page = gfn_to_page(slot, gfn);
394
395 while (page->private) {
396 if (!(page->private & 1))
397 spte = (u64 *)page->private;
398 else {
399 desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
400 spte = desc->shadow_ptes[0];
401 }
402 BUG_ON(!spte);
403 BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
404 page_to_pfn(page) << PAGE_SHIFT);
405 BUG_ON(!(*spte & PT_PRESENT_MASK));
406 BUG_ON(!(*spte & PT_WRITABLE_MASK));
407 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
408 rmap_remove(vcpu, spte);
409 kvm_arch_ops->tlb_flush(vcpu);
410 *spte &= ~(u64)PT_WRITABLE_MASK;
411 }
412}
413
414static int is_empty_shadow_page(hpa_t page_hpa)
415{
416 u64 *pos;
417 u64 *end;
418
419 for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64);
420 pos != end; pos++)
421 if (*pos != 0) {
422 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
423 pos, *pos);
424 return 0;
425 }
426 return 1;
427}
428
153static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa) 429static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
154{ 430{
155 struct kvm_mmu_page *page_head = page_header(page_hpa); 431 struct kvm_mmu_page *page_head = page_header(page_hpa);
156 432
433 ASSERT(is_empty_shadow_page(page_hpa));
157 list_del(&page_head->link); 434 list_del(&page_head->link);
158 page_head->page_hpa = page_hpa; 435 page_head->page_hpa = page_hpa;
159 list_add(&page_head->link, &vcpu->free_pages); 436 list_add(&page_head->link, &vcpu->free_pages);
437 ++vcpu->kvm->n_free_mmu_pages;
160} 438}
161 439
162static int is_empty_shadow_page(hpa_t page_hpa) 440static unsigned kvm_page_table_hashfn(gfn_t gfn)
163{ 441{
164 u32 *pos; 442 return gfn;
165 u32 *end;
166 for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u32);
167 pos != end; pos++)
168 if (*pos != 0)
169 return 0;
170 return 1;
171} 443}
172 444
173static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte) 445static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
446 u64 *parent_pte)
174{ 447{
175 struct kvm_mmu_page *page; 448 struct kvm_mmu_page *page;
176 449
177 if (list_empty(&vcpu->free_pages)) 450 if (list_empty(&vcpu->free_pages))
178 return INVALID_PAGE; 451 return NULL;
179 452
180 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); 453 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
181 list_del(&page->link); 454 list_del(&page->link);
@@ -183,8 +456,239 @@ static hpa_t kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, u64 *parent_pte)
183 ASSERT(is_empty_shadow_page(page->page_hpa)); 456 ASSERT(is_empty_shadow_page(page->page_hpa));
184 page->slot_bitmap = 0; 457 page->slot_bitmap = 0;
185 page->global = 1; 458 page->global = 1;
459 page->multimapped = 0;
186 page->parent_pte = parent_pte; 460 page->parent_pte = parent_pte;
187 return page->page_hpa; 461 --vcpu->kvm->n_free_mmu_pages;
462 return page;
463}
464
465static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
466 struct kvm_mmu_page *page, u64 *parent_pte)
467{
468 struct kvm_pte_chain *pte_chain;
469 struct hlist_node *node;
470 int i;
471
472 if (!parent_pte)
473 return;
474 if (!page->multimapped) {
475 u64 *old = page->parent_pte;
476
477 if (!old) {
478 page->parent_pte = parent_pte;
479 return;
480 }
481 page->multimapped = 1;
482 pte_chain = mmu_alloc_pte_chain(vcpu);
483 INIT_HLIST_HEAD(&page->parent_ptes);
484 hlist_add_head(&pte_chain->link, &page->parent_ptes);
485 pte_chain->parent_ptes[0] = old;
486 }
487 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
488 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
489 continue;
490 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
491 if (!pte_chain->parent_ptes[i]) {
492 pte_chain->parent_ptes[i] = parent_pte;
493 return;
494 }
495 }
496 pte_chain = mmu_alloc_pte_chain(vcpu);
497 BUG_ON(!pte_chain);
498 hlist_add_head(&pte_chain->link, &page->parent_ptes);
499 pte_chain->parent_ptes[0] = parent_pte;
500}
501
502static void mmu_page_remove_parent_pte(struct kvm_vcpu *vcpu,
503 struct kvm_mmu_page *page,
504 u64 *parent_pte)
505{
506 struct kvm_pte_chain *pte_chain;
507 struct hlist_node *node;
508 int i;
509
510 if (!page->multimapped) {
511 BUG_ON(page->parent_pte != parent_pte);
512 page->parent_pte = NULL;
513 return;
514 }
515 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
516 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
517 if (!pte_chain->parent_ptes[i])
518 break;
519 if (pte_chain->parent_ptes[i] != parent_pte)
520 continue;
521 while (i + 1 < NR_PTE_CHAIN_ENTRIES
522 && pte_chain->parent_ptes[i + 1]) {
523 pte_chain->parent_ptes[i]
524 = pte_chain->parent_ptes[i + 1];
525 ++i;
526 }
527 pte_chain->parent_ptes[i] = NULL;
528 if (i == 0) {
529 hlist_del(&pte_chain->link);
530 mmu_free_pte_chain(vcpu, pte_chain);
531 if (hlist_empty(&page->parent_ptes)) {
532 page->multimapped = 0;
533 page->parent_pte = NULL;
534 }
535 }
536 return;
537 }
538 BUG();
539}
540
541static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
542 gfn_t gfn)
543{
544 unsigned index;
545 struct hlist_head *bucket;
546 struct kvm_mmu_page *page;
547 struct hlist_node *node;
548
549 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
550 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
551 bucket = &vcpu->kvm->mmu_page_hash[index];
552 hlist_for_each_entry(page, node, bucket, hash_link)
553 if (page->gfn == gfn && !page->role.metaphysical) {
554 pgprintk("%s: found role %x\n",
555 __FUNCTION__, page->role.word);
556 return page;
557 }
558 return NULL;
559}
560
561static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
562 gfn_t gfn,
563 gva_t gaddr,
564 unsigned level,
565 int metaphysical,
566 u64 *parent_pte)
567{
568 union kvm_mmu_page_role role;
569 unsigned index;
570 unsigned quadrant;
571 struct hlist_head *bucket;
572 struct kvm_mmu_page *page;
573 struct hlist_node *node;
574
575 role.word = 0;
576 role.glevels = vcpu->mmu.root_level;
577 role.level = level;
578 role.metaphysical = metaphysical;
579 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
580 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
581 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
582 role.quadrant = quadrant;
583 }
584 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
585 gfn, role.word);
586 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
587 bucket = &vcpu->kvm->mmu_page_hash[index];
588 hlist_for_each_entry(page, node, bucket, hash_link)
589 if (page->gfn == gfn && page->role.word == role.word) {
590 mmu_page_add_parent_pte(vcpu, page, parent_pte);
591 pgprintk("%s: found\n", __FUNCTION__);
592 return page;
593 }
594 page = kvm_mmu_alloc_page(vcpu, parent_pte);
595 if (!page)
596 return page;
597 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
598 page->gfn = gfn;
599 page->role = role;
600 hlist_add_head(&page->hash_link, bucket);
601 if (!metaphysical)
602 rmap_write_protect(vcpu, gfn);
603 return page;
604}
605
606static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
607 struct kvm_mmu_page *page)
608{
609 unsigned i;
610 u64 *pt;
611 u64 ent;
612
613 pt = __va(page->page_hpa);
614
615 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
616 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
617 if (pt[i] & PT_PRESENT_MASK)
618 rmap_remove(vcpu, &pt[i]);
619 pt[i] = 0;
620 }
621 kvm_arch_ops->tlb_flush(vcpu);
622 return;
623 }
624
625 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
626 ent = pt[i];
627
628 pt[i] = 0;
629 if (!(ent & PT_PRESENT_MASK))
630 continue;
631 ent &= PT64_BASE_ADDR_MASK;
632 mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
633 }
634}
635
636static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
637 struct kvm_mmu_page *page,
638 u64 *parent_pte)
639{
640 mmu_page_remove_parent_pte(vcpu, page, parent_pte);
641}
642
643static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
644 struct kvm_mmu_page *page)
645{
646 u64 *parent_pte;
647
648 while (page->multimapped || page->parent_pte) {
649 if (!page->multimapped)
650 parent_pte = page->parent_pte;
651 else {
652 struct kvm_pte_chain *chain;
653
654 chain = container_of(page->parent_ptes.first,
655 struct kvm_pte_chain, link);
656 parent_pte = chain->parent_ptes[0];
657 }
658 BUG_ON(!parent_pte);
659 kvm_mmu_put_page(vcpu, page, parent_pte);
660 *parent_pte = 0;
661 }
662 kvm_mmu_page_unlink_children(vcpu, page);
663 if (!page->root_count) {
664 hlist_del(&page->hash_link);
665 kvm_mmu_free_page(vcpu, page->page_hpa);
666 } else {
667 list_del(&page->link);
668 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
669 }
670}
671
672static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
673{
674 unsigned index;
675 struct hlist_head *bucket;
676 struct kvm_mmu_page *page;
677 struct hlist_node *node, *n;
678 int r;
679
680 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
681 r = 0;
682 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
683 bucket = &vcpu->kvm->mmu_page_hash[index];
684 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
685 if (page->gfn == gfn && !page->role.metaphysical) {
686 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
687 page->role.word);
688 kvm_mmu_zap_page(vcpu, page);
689 r = 1;
690 }
691 return r;
188} 692}
189 693
190static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) 694static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
@@ -225,35 +729,6 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
225 return gpa_to_hpa(vcpu, gpa); 729 return gpa_to_hpa(vcpu, gpa);
226} 730}
227 731
228
229static void release_pt_page_64(struct kvm_vcpu *vcpu, hpa_t page_hpa,
230 int level)
231{
232 ASSERT(vcpu);
233 ASSERT(VALID_PAGE(page_hpa));
234 ASSERT(level <= PT64_ROOT_LEVEL && level > 0);
235
236 if (level == 1)
237 memset(__va(page_hpa), 0, PAGE_SIZE);
238 else {
239 u64 *pos;
240 u64 *end;
241
242 for (pos = __va(page_hpa), end = pos + PT64_ENT_PER_PAGE;
243 pos != end; pos++) {
244 u64 current_ent = *pos;
245
246 *pos = 0;
247 if (is_present_pte(current_ent))
248 release_pt_page_64(vcpu,
249 current_ent &
250 PT64_BASE_ADDR_MASK,
251 level - 1);
252 }
253 }
254 kvm_mmu_free_page(vcpu, page_hpa);
255}
256
257static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 732static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
258{ 733{
259} 734}
@@ -266,52 +741,109 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
266 for (; ; level--) { 741 for (; ; level--) {
267 u32 index = PT64_INDEX(v, level); 742 u32 index = PT64_INDEX(v, level);
268 u64 *table; 743 u64 *table;
744 u64 pte;
269 745
270 ASSERT(VALID_PAGE(table_addr)); 746 ASSERT(VALID_PAGE(table_addr));
271 table = __va(table_addr); 747 table = __va(table_addr);
272 748
273 if (level == 1) { 749 if (level == 1) {
750 pte = table[index];
751 if (is_present_pte(pte) && is_writeble_pte(pte))
752 return 0;
274 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); 753 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
275 page_header_update_slot(vcpu->kvm, table, v); 754 page_header_update_slot(vcpu->kvm, table, v);
276 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK | 755 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
277 PT_USER_MASK; 756 PT_USER_MASK;
757 rmap_add(vcpu, &table[index]);
278 return 0; 758 return 0;
279 } 759 }
280 760
281 if (table[index] == 0) { 761 if (table[index] == 0) {
282 hpa_t new_table = kvm_mmu_alloc_page(vcpu, 762 struct kvm_mmu_page *new_table;
283 &table[index]); 763 gfn_t pseudo_gfn;
284 764
285 if (!VALID_PAGE(new_table)) { 765 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
766 >> PAGE_SHIFT;
767 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
768 v, level - 1,
769 1, &table[index]);
770 if (!new_table) {
286 pgprintk("nonpaging_map: ENOMEM\n"); 771 pgprintk("nonpaging_map: ENOMEM\n");
287 return -ENOMEM; 772 return -ENOMEM;
288 } 773 }
289 774
290 if (level == PT32E_ROOT_LEVEL) 775 table[index] = new_table->page_hpa | PT_PRESENT_MASK
291 table[index] = new_table | PT_PRESENT_MASK; 776 | PT_WRITABLE_MASK | PT_USER_MASK;
292 else
293 table[index] = new_table | PT_PRESENT_MASK |
294 PT_WRITABLE_MASK | PT_USER_MASK;
295 } 777 }
296 table_addr = table[index] & PT64_BASE_ADDR_MASK; 778 table_addr = table[index] & PT64_BASE_ADDR_MASK;
297 } 779 }
298} 780}
299 781
300static void nonpaging_flush(struct kvm_vcpu *vcpu) 782static void mmu_free_roots(struct kvm_vcpu *vcpu)
301{ 783{
302 hpa_t root = vcpu->mmu.root_hpa; 784 int i;
785 struct kvm_mmu_page *page;
303 786
304 ++kvm_stat.tlb_flush; 787#ifdef CONFIG_X86_64
305 pgprintk("nonpaging_flush\n"); 788 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
306 ASSERT(VALID_PAGE(root)); 789 hpa_t root = vcpu->mmu.root_hpa;
307 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level); 790
308 root = kvm_mmu_alloc_page(vcpu, NULL); 791 ASSERT(VALID_PAGE(root));
309 ASSERT(VALID_PAGE(root)); 792 page = page_header(root);
310 vcpu->mmu.root_hpa = root; 793 --page->root_count;
311 if (is_paging(vcpu)) 794 vcpu->mmu.root_hpa = INVALID_PAGE;
312 root |= (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)); 795 return;
313 kvm_arch_ops->set_cr3(vcpu, root); 796 }
314 kvm_arch_ops->tlb_flush(vcpu); 797#endif
798 for (i = 0; i < 4; ++i) {
799 hpa_t root = vcpu->mmu.pae_root[i];
800
801 ASSERT(VALID_PAGE(root));
802 root &= PT64_BASE_ADDR_MASK;
803 page = page_header(root);
804 --page->root_count;
805 vcpu->mmu.pae_root[i] = INVALID_PAGE;
806 }
807 vcpu->mmu.root_hpa = INVALID_PAGE;
808}
809
810static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
811{
812 int i;
813 gfn_t root_gfn;
814 struct kvm_mmu_page *page;
815
816 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
817
818#ifdef CONFIG_X86_64
819 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
820 hpa_t root = vcpu->mmu.root_hpa;
821
822 ASSERT(!VALID_PAGE(root));
823 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
824 PT64_ROOT_LEVEL, 0, NULL);
825 root = page->page_hpa;
826 ++page->root_count;
827 vcpu->mmu.root_hpa = root;
828 return;
829 }
830#endif
831 for (i = 0; i < 4; ++i) {
832 hpa_t root = vcpu->mmu.pae_root[i];
833
834 ASSERT(!VALID_PAGE(root));
835 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL)
836 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
837 else if (vcpu->mmu.root_level == 0)
838 root_gfn = 0;
839 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
840 PT32_ROOT_LEVEL, !is_paging(vcpu),
841 NULL);
842 root = page->page_hpa;
843 ++page->root_count;
844 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
845 }
846 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
315} 847}
316 848
317static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 849static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
@@ -322,43 +854,29 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
322static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 854static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
323 u32 error_code) 855 u32 error_code)
324{ 856{
325 int ret;
326 gpa_t addr = gva; 857 gpa_t addr = gva;
858 hpa_t paddr;
859 int r;
860
861 r = mmu_topup_memory_caches(vcpu);
862 if (r)
863 return r;
327 864
328 ASSERT(vcpu); 865 ASSERT(vcpu);
329 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); 866 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
330 867
331 for (;;) {
332 hpa_t paddr;
333
334 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
335 868
336 if (is_error_hpa(paddr)) 869 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
337 return 1;
338 870
339 ret = nonpaging_map(vcpu, addr & PAGE_MASK, paddr); 871 if (is_error_hpa(paddr))
340 if (ret) { 872 return 1;
341 nonpaging_flush(vcpu);
342 continue;
343 }
344 break;
345 }
346 return ret;
347}
348 873
349static void nonpaging_inval_page(struct kvm_vcpu *vcpu, gva_t addr) 874 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
350{
351} 875}
352 876
353static void nonpaging_free(struct kvm_vcpu *vcpu) 877static void nonpaging_free(struct kvm_vcpu *vcpu)
354{ 878{
355 hpa_t root; 879 mmu_free_roots(vcpu);
356
357 ASSERT(vcpu);
358 root = vcpu->mmu.root_hpa;
359 if (VALID_PAGE(root))
360 release_pt_page_64(vcpu, root, vcpu->mmu.shadow_root_level);
361 vcpu->mmu.root_hpa = INVALID_PAGE;
362} 880}
363 881
364static int nonpaging_init_context(struct kvm_vcpu *vcpu) 882static int nonpaging_init_context(struct kvm_vcpu *vcpu)
@@ -367,40 +885,31 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
367 885
368 context->new_cr3 = nonpaging_new_cr3; 886 context->new_cr3 = nonpaging_new_cr3;
369 context->page_fault = nonpaging_page_fault; 887 context->page_fault = nonpaging_page_fault;
370 context->inval_page = nonpaging_inval_page;
371 context->gva_to_gpa = nonpaging_gva_to_gpa; 888 context->gva_to_gpa = nonpaging_gva_to_gpa;
372 context->free = nonpaging_free; 889 context->free = nonpaging_free;
373 context->root_level = PT32E_ROOT_LEVEL; 890 context->root_level = 0;
374 context->shadow_root_level = PT32E_ROOT_LEVEL; 891 context->shadow_root_level = PT32E_ROOT_LEVEL;
375 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL); 892 mmu_alloc_roots(vcpu);
376 ASSERT(VALID_PAGE(context->root_hpa)); 893 ASSERT(VALID_PAGE(context->root_hpa));
377 kvm_arch_ops->set_cr3(vcpu, context->root_hpa); 894 kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
378 return 0; 895 return 0;
379} 896}
380 897
381
382static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) 898static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
383{ 899{
384 struct kvm_mmu_page *page, *npage;
385
386 list_for_each_entry_safe(page, npage, &vcpu->kvm->active_mmu_pages,
387 link) {
388 if (page->global)
389 continue;
390
391 if (!page->parent_pte)
392 continue;
393
394 *page->parent_pte = 0;
395 release_pt_page_64(vcpu, page->page_hpa, 1);
396 }
397 ++kvm_stat.tlb_flush; 900 ++kvm_stat.tlb_flush;
398 kvm_arch_ops->tlb_flush(vcpu); 901 kvm_arch_ops->tlb_flush(vcpu);
399} 902}
400 903
401static void paging_new_cr3(struct kvm_vcpu *vcpu) 904static void paging_new_cr3(struct kvm_vcpu *vcpu)
402{ 905{
906 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
907 mmu_free_roots(vcpu);
908 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
909 kvm_mmu_free_some_pages(vcpu);
910 mmu_alloc_roots(vcpu);
403 kvm_mmu_flush_tlb(vcpu); 911 kvm_mmu_flush_tlb(vcpu);
912 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
404} 913}
405 914
406static void mark_pagetable_nonglobal(void *shadow_pte) 915static void mark_pagetable_nonglobal(void *shadow_pte)
@@ -412,7 +921,8 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
412 u64 *shadow_pte, 921 u64 *shadow_pte,
413 gpa_t gaddr, 922 gpa_t gaddr,
414 int dirty, 923 int dirty,
415 u64 access_bits) 924 u64 access_bits,
925 gfn_t gfn)
416{ 926{
417 hpa_t paddr; 927 hpa_t paddr;
418 928
@@ -420,13 +930,10 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
420 if (!dirty) 930 if (!dirty)
421 access_bits &= ~PT_WRITABLE_MASK; 931 access_bits &= ~PT_WRITABLE_MASK;
422 932
423 if (access_bits & PT_WRITABLE_MASK) 933 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
424 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
425 934
426 *shadow_pte |= access_bits; 935 *shadow_pte |= access_bits;
427 936
428 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
429
430 if (!(*shadow_pte & PT_GLOBAL_MASK)) 937 if (!(*shadow_pte & PT_GLOBAL_MASK))
431 mark_pagetable_nonglobal(shadow_pte); 938 mark_pagetable_nonglobal(shadow_pte);
432 939
@@ -434,10 +941,31 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
434 *shadow_pte |= gaddr; 941 *shadow_pte |= gaddr;
435 *shadow_pte |= PT_SHADOW_IO_MARK; 942 *shadow_pte |= PT_SHADOW_IO_MARK;
436 *shadow_pte &= ~PT_PRESENT_MASK; 943 *shadow_pte &= ~PT_PRESENT_MASK;
437 } else { 944 return;
438 *shadow_pte |= paddr; 945 }
439 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr); 946
947 *shadow_pte |= paddr;
948
949 if (access_bits & PT_WRITABLE_MASK) {
950 struct kvm_mmu_page *shadow;
951
952 shadow = kvm_mmu_lookup_page(vcpu, gfn);
953 if (shadow) {
954 pgprintk("%s: found shadow page for %lx, marking ro\n",
955 __FUNCTION__, gfn);
956 access_bits &= ~PT_WRITABLE_MASK;
957 if (is_writeble_pte(*shadow_pte)) {
958 *shadow_pte &= ~PT_WRITABLE_MASK;
959 kvm_arch_ops->tlb_flush(vcpu);
960 }
961 }
440 } 962 }
963
964 if (access_bits & PT_WRITABLE_MASK)
965 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
966
967 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
968 rmap_add(vcpu, shadow_pte);
441} 969}
442 970
443static void inject_page_fault(struct kvm_vcpu *vcpu, 971static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -474,41 +1002,6 @@ static int may_access(u64 pte, int write, int user)
474 return 1; 1002 return 1;
475} 1003}
476 1004
477/*
478 * Remove a shadow pte.
479 */
480static void paging_inval_page(struct kvm_vcpu *vcpu, gva_t addr)
481{
482 hpa_t page_addr = vcpu->mmu.root_hpa;
483 int level = vcpu->mmu.shadow_root_level;
484
485 ++kvm_stat.invlpg;
486
487 for (; ; level--) {
488 u32 index = PT64_INDEX(addr, level);
489 u64 *table = __va(page_addr);
490
491 if (level == PT_PAGE_TABLE_LEVEL ) {
492 table[index] = 0;
493 return;
494 }
495
496 if (!is_present_pte(table[index]))
497 return;
498
499 page_addr = table[index] & PT64_BASE_ADDR_MASK;
500
501 if (level == PT_DIRECTORY_LEVEL &&
502 (table[index] & PT_SHADOW_PS_MARK)) {
503 table[index] = 0;
504 release_pt_page_64(vcpu, page_addr, PT_PAGE_TABLE_LEVEL);
505
506 kvm_arch_ops->tlb_flush(vcpu);
507 return;
508 }
509 }
510}
511
512static void paging_free(struct kvm_vcpu *vcpu) 1005static void paging_free(struct kvm_vcpu *vcpu)
513{ 1006{
514 nonpaging_free(vcpu); 1007 nonpaging_free(vcpu);
@@ -522,37 +1015,40 @@ static void paging_free(struct kvm_vcpu *vcpu)
522#include "paging_tmpl.h" 1015#include "paging_tmpl.h"
523#undef PTTYPE 1016#undef PTTYPE
524 1017
525static int paging64_init_context(struct kvm_vcpu *vcpu) 1018static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
526{ 1019{
527 struct kvm_mmu *context = &vcpu->mmu; 1020 struct kvm_mmu *context = &vcpu->mmu;
528 1021
529 ASSERT(is_pae(vcpu)); 1022 ASSERT(is_pae(vcpu));
530 context->new_cr3 = paging_new_cr3; 1023 context->new_cr3 = paging_new_cr3;
531 context->page_fault = paging64_page_fault; 1024 context->page_fault = paging64_page_fault;
532 context->inval_page = paging_inval_page;
533 context->gva_to_gpa = paging64_gva_to_gpa; 1025 context->gva_to_gpa = paging64_gva_to_gpa;
534 context->free = paging_free; 1026 context->free = paging_free;
535 context->root_level = PT64_ROOT_LEVEL; 1027 context->root_level = level;
536 context->shadow_root_level = PT64_ROOT_LEVEL; 1028 context->shadow_root_level = level;
537 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL); 1029 mmu_alloc_roots(vcpu);
538 ASSERT(VALID_PAGE(context->root_hpa)); 1030 ASSERT(VALID_PAGE(context->root_hpa));
539 kvm_arch_ops->set_cr3(vcpu, context->root_hpa | 1031 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
540 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); 1032 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
541 return 0; 1033 return 0;
542} 1034}
543 1035
1036static int paging64_init_context(struct kvm_vcpu *vcpu)
1037{
1038 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1039}
1040
544static int paging32_init_context(struct kvm_vcpu *vcpu) 1041static int paging32_init_context(struct kvm_vcpu *vcpu)
545{ 1042{
546 struct kvm_mmu *context = &vcpu->mmu; 1043 struct kvm_mmu *context = &vcpu->mmu;
547 1044
548 context->new_cr3 = paging_new_cr3; 1045 context->new_cr3 = paging_new_cr3;
549 context->page_fault = paging32_page_fault; 1046 context->page_fault = paging32_page_fault;
550 context->inval_page = paging_inval_page;
551 context->gva_to_gpa = paging32_gva_to_gpa; 1047 context->gva_to_gpa = paging32_gva_to_gpa;
552 context->free = paging_free; 1048 context->free = paging_free;
553 context->root_level = PT32_ROOT_LEVEL; 1049 context->root_level = PT32_ROOT_LEVEL;
554 context->shadow_root_level = PT32E_ROOT_LEVEL; 1050 context->shadow_root_level = PT32E_ROOT_LEVEL;
555 context->root_hpa = kvm_mmu_alloc_page(vcpu, NULL); 1051 mmu_alloc_roots(vcpu);
556 ASSERT(VALID_PAGE(context->root_hpa)); 1052 ASSERT(VALID_PAGE(context->root_hpa));
557 kvm_arch_ops->set_cr3(vcpu, context->root_hpa | 1053 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
558 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK))); 1054 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
@@ -561,14 +1057,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
561 1057
562static int paging32E_init_context(struct kvm_vcpu *vcpu) 1058static int paging32E_init_context(struct kvm_vcpu *vcpu)
563{ 1059{
564 int ret; 1060 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
565
566 if ((ret = paging64_init_context(vcpu)))
567 return ret;
568
569 vcpu->mmu.root_level = PT32E_ROOT_LEVEL;
570 vcpu->mmu.shadow_root_level = PT32E_ROOT_LEVEL;
571 return 0;
572} 1061}
573 1062
574static int init_kvm_mmu(struct kvm_vcpu *vcpu) 1063static int init_kvm_mmu(struct kvm_vcpu *vcpu)
@@ -597,41 +1086,161 @@ static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
597 1086
598int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) 1087int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
599{ 1088{
1089 int r;
1090
600 destroy_kvm_mmu(vcpu); 1091 destroy_kvm_mmu(vcpu);
601 return init_kvm_mmu(vcpu); 1092 r = init_kvm_mmu(vcpu);
1093 if (r < 0)
1094 goto out;
1095 r = mmu_topup_memory_caches(vcpu);
1096out:
1097 return r;
602} 1098}
603 1099
604static void free_mmu_pages(struct kvm_vcpu *vcpu) 1100void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
605{ 1101{
606 while (!list_empty(&vcpu->free_pages)) { 1102 gfn_t gfn = gpa >> PAGE_SHIFT;
1103 struct kvm_mmu_page *page;
1104 struct kvm_mmu_page *child;
1105 struct hlist_node *node, *n;
1106 struct hlist_head *bucket;
1107 unsigned index;
1108 u64 *spte;
1109 u64 pte;
1110 unsigned offset = offset_in_page(gpa);
1111 unsigned pte_size;
1112 unsigned page_offset;
1113 unsigned misaligned;
1114 int level;
1115 int flooded = 0;
1116
1117 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1118 if (gfn == vcpu->last_pt_write_gfn) {
1119 ++vcpu->last_pt_write_count;
1120 if (vcpu->last_pt_write_count >= 3)
1121 flooded = 1;
1122 } else {
1123 vcpu->last_pt_write_gfn = gfn;
1124 vcpu->last_pt_write_count = 1;
1125 }
1126 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1127 bucket = &vcpu->kvm->mmu_page_hash[index];
1128 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1129 if (page->gfn != gfn || page->role.metaphysical)
1130 continue;
1131 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1132 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1133 if (misaligned || flooded) {
1134 /*
1135 * Misaligned accesses are too much trouble to fix
1136 * up; also, they usually indicate a page is not used
1137 * as a page table.
1138 *
1139 * If we're seeing too many writes to a page,
1140 * it may no longer be a page table, or we may be
1141 * forking, in which case it is better to unmap the
1142 * page.
1143 */
1144 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1145 gpa, bytes, page->role.word);
1146 kvm_mmu_zap_page(vcpu, page);
1147 continue;
1148 }
1149 page_offset = offset;
1150 level = page->role.level;
1151 if (page->role.glevels == PT32_ROOT_LEVEL) {
1152 page_offset <<= 1; /* 32->64 */
1153 page_offset &= ~PAGE_MASK;
1154 }
1155 spte = __va(page->page_hpa);
1156 spte += page_offset / sizeof(*spte);
1157 pte = *spte;
1158 if (is_present_pte(pte)) {
1159 if (level == PT_PAGE_TABLE_LEVEL)
1160 rmap_remove(vcpu, spte);
1161 else {
1162 child = page_header(pte & PT64_BASE_ADDR_MASK);
1163 mmu_page_remove_parent_pte(vcpu, child, spte);
1164 }
1165 }
1166 *spte = 0;
1167 }
1168}
1169
1170void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1171{
1172}
1173
1174int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1175{
1176 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1177
1178 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
1179}
1180
1181void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1182{
1183 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
607 struct kvm_mmu_page *page; 1184 struct kvm_mmu_page *page;
608 1185
1186 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1187 struct kvm_mmu_page, link);
1188 kvm_mmu_zap_page(vcpu, page);
1189 }
1190}
1191EXPORT_SYMBOL_GPL(kvm_mmu_free_some_pages);
1192
1193static void free_mmu_pages(struct kvm_vcpu *vcpu)
1194{
1195 struct kvm_mmu_page *page;
1196
1197 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1198 page = container_of(vcpu->kvm->active_mmu_pages.next,
1199 struct kvm_mmu_page, link);
1200 kvm_mmu_zap_page(vcpu, page);
1201 }
1202 while (!list_empty(&vcpu->free_pages)) {
609 page = list_entry(vcpu->free_pages.next, 1203 page = list_entry(vcpu->free_pages.next,
610 struct kvm_mmu_page, link); 1204 struct kvm_mmu_page, link);
611 list_del(&page->link); 1205 list_del(&page->link);
612 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT)); 1206 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
613 page->page_hpa = INVALID_PAGE; 1207 page->page_hpa = INVALID_PAGE;
614 } 1208 }
1209 free_page((unsigned long)vcpu->mmu.pae_root);
615} 1210}
616 1211
617static int alloc_mmu_pages(struct kvm_vcpu *vcpu) 1212static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
618{ 1213{
1214 struct page *page;
619 int i; 1215 int i;
620 1216
621 ASSERT(vcpu); 1217 ASSERT(vcpu);
622 1218
623 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) { 1219 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) {
624 struct page *page;
625 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i]; 1220 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
626 1221
627 INIT_LIST_HEAD(&page_header->link); 1222 INIT_LIST_HEAD(&page_header->link);
628 if ((page = alloc_page(GFP_KVM_MMU)) == NULL) 1223 if ((page = alloc_page(GFP_KERNEL)) == NULL)
629 goto error_1; 1224 goto error_1;
630 page->private = (unsigned long)page_header; 1225 page->private = (unsigned long)page_header;
631 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; 1226 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
632 memset(__va(page_header->page_hpa), 0, PAGE_SIZE); 1227 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
633 list_add(&page_header->link, &vcpu->free_pages); 1228 list_add(&page_header->link, &vcpu->free_pages);
1229 ++vcpu->kvm->n_free_mmu_pages;
634 } 1230 }
1231
1232 /*
1233 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1234 * Therefore we need to allocate shadow page tables in the first
1235 * 4GB of memory, which happens to fit the DMA32 zone.
1236 */
1237 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1238 if (!page)
1239 goto error_1;
1240 vcpu->mmu.pae_root = page_address(page);
1241 for (i = 0; i < 4; ++i)
1242 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1243
635 return 0; 1244 return 0;
636 1245
637error_1: 1246error_1:
@@ -663,10 +1272,12 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
663 1272
664 destroy_kvm_mmu(vcpu); 1273 destroy_kvm_mmu(vcpu);
665 free_mmu_pages(vcpu); 1274 free_mmu_pages(vcpu);
1275 mmu_free_memory_caches(vcpu);
666} 1276}
667 1277
668void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) 1278void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
669{ 1279{
1280 struct kvm *kvm = vcpu->kvm;
670 struct kvm_mmu_page *page; 1281 struct kvm_mmu_page *page;
671 1282
672 list_for_each_entry(page, &kvm->active_mmu_pages, link) { 1283 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
@@ -679,8 +1290,169 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
679 pt = __va(page->page_hpa); 1290 pt = __va(page->page_hpa);
680 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 1291 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
681 /* avoid RMW */ 1292 /* avoid RMW */
682 if (pt[i] & PT_WRITABLE_MASK) 1293 if (pt[i] & PT_WRITABLE_MASK) {
1294 rmap_remove(vcpu, &pt[i]);
683 pt[i] &= ~PT_WRITABLE_MASK; 1295 pt[i] &= ~PT_WRITABLE_MASK;
1296 }
1297 }
1298}
1299
1300#ifdef AUDIT
1301
1302static const char *audit_msg;
1303
1304static gva_t canonicalize(gva_t gva)
1305{
1306#ifdef CONFIG_X86_64
1307 gva = (long long)(gva << 16) >> 16;
1308#endif
1309 return gva;
1310}
684 1311
1312static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1313 gva_t va, int level)
1314{
1315 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1316 int i;
1317 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1318
1319 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1320 u64 ent = pt[i];
1321
1322 if (!ent & PT_PRESENT_MASK)
1323 continue;
1324
1325 va = canonicalize(va);
1326 if (level > 1)
1327 audit_mappings_page(vcpu, ent, va, level - 1);
1328 else {
1329 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1330 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1331
1332 if ((ent & PT_PRESENT_MASK)
1333 && (ent & PT64_BASE_ADDR_MASK) != hpa)
1334 printk(KERN_ERR "audit error: (%s) levels %d"
1335 " gva %lx gpa %llx hpa %llx ent %llx\n",
1336 audit_msg, vcpu->mmu.root_level,
1337 va, gpa, hpa, ent);
1338 }
685 } 1339 }
686} 1340}
1341
1342static void audit_mappings(struct kvm_vcpu *vcpu)
1343{
1344 int i;
1345
1346 if (vcpu->mmu.root_level == 4)
1347 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1348 else
1349 for (i = 0; i < 4; ++i)
1350 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1351 audit_mappings_page(vcpu,
1352 vcpu->mmu.pae_root[i],
1353 i << 30,
1354 2);
1355}
1356
1357static int count_rmaps(struct kvm_vcpu *vcpu)
1358{
1359 int nmaps = 0;
1360 int i, j, k;
1361
1362 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1363 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1364 struct kvm_rmap_desc *d;
1365
1366 for (j = 0; j < m->npages; ++j) {
1367 struct page *page = m->phys_mem[j];
1368
1369 if (!page->private)
1370 continue;
1371 if (!(page->private & 1)) {
1372 ++nmaps;
1373 continue;
1374 }
1375 d = (struct kvm_rmap_desc *)(page->private & ~1ul);
1376 while (d) {
1377 for (k = 0; k < RMAP_EXT; ++k)
1378 if (d->shadow_ptes[k])
1379 ++nmaps;
1380 else
1381 break;
1382 d = d->more;
1383 }
1384 }
1385 }
1386 return nmaps;
1387}
1388
1389static int count_writable_mappings(struct kvm_vcpu *vcpu)
1390{
1391 int nmaps = 0;
1392 struct kvm_mmu_page *page;
1393 int i;
1394
1395 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1396 u64 *pt = __va(page->page_hpa);
1397
1398 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1399 continue;
1400
1401 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1402 u64 ent = pt[i];
1403
1404 if (!(ent & PT_PRESENT_MASK))
1405 continue;
1406 if (!(ent & PT_WRITABLE_MASK))
1407 continue;
1408 ++nmaps;
1409 }
1410 }
1411 return nmaps;
1412}
1413
1414static void audit_rmap(struct kvm_vcpu *vcpu)
1415{
1416 int n_rmap = count_rmaps(vcpu);
1417 int n_actual = count_writable_mappings(vcpu);
1418
1419 if (n_rmap != n_actual)
1420 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1421 __FUNCTION__, audit_msg, n_rmap, n_actual);
1422}
1423
1424static void audit_write_protection(struct kvm_vcpu *vcpu)
1425{
1426 struct kvm_mmu_page *page;
1427
1428 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1429 hfn_t hfn;
1430 struct page *pg;
1431
1432 if (page->role.metaphysical)
1433 continue;
1434
1435 hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
1436 >> PAGE_SHIFT;
1437 pg = pfn_to_page(hfn);
1438 if (pg->private)
1439 printk(KERN_ERR "%s: (%s) shadow page has writable"
1440 " mappings: gfn %lx role %x\n",
1441 __FUNCTION__, audit_msg, page->gfn,
1442 page->role.word);
1443 }
1444}
1445
1446static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1447{
1448 int olddbg = dbg;
1449
1450 dbg = 0;
1451 audit_msg = msg;
1452 audit_rmap(vcpu);
1453 audit_write_protection(vcpu);
1454 audit_mappings(vcpu);
1455 dbg = olddbg;
1456}
1457
1458#endif
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 09bb9b4ed12d..2dbf4307ed9e 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -32,6 +32,11 @@
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK 34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
37 #else
38 #define PT_MAX_FULL_LEVELS 2
39 #endif
35#elif PTTYPE == 32 40#elif PTTYPE == 32
36 #define pt_element_t u32 41 #define pt_element_t u32
37 #define guest_walker guest_walker32 42 #define guest_walker guest_walker32
@@ -42,6 +47,7 @@
42 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
43 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) 48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
44 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK 49 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
50 #define PT_MAX_FULL_LEVELS 2
45#else 51#else
46 #error Invalid PTTYPE value 52 #error Invalid PTTYPE value
47#endif 53#endif
@@ -52,93 +58,126 @@
52 */ 58 */
53struct guest_walker { 59struct guest_walker {
54 int level; 60 int level;
61 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
55 pt_element_t *table; 62 pt_element_t *table;
63 pt_element_t *ptep;
56 pt_element_t inherited_ar; 64 pt_element_t inherited_ar;
65 gfn_t gfn;
57}; 66};
58 67
59static void FNAME(init_walker)(struct guest_walker *walker, 68/*
60 struct kvm_vcpu *vcpu) 69 * Fetch a guest pte for a guest virtual address
70 */
71static void FNAME(walk_addr)(struct guest_walker *walker,
72 struct kvm_vcpu *vcpu, gva_t addr)
61{ 73{
62 hpa_t hpa; 74 hpa_t hpa;
63 struct kvm_memory_slot *slot; 75 struct kvm_memory_slot *slot;
76 pt_element_t *ptep;
77 pt_element_t root;
78 gfn_t table_gfn;
64 79
80 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
65 walker->level = vcpu->mmu.root_level; 81 walker->level = vcpu->mmu.root_level;
66 slot = gfn_to_memslot(vcpu->kvm, 82 walker->table = NULL;
67 (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 83 root = vcpu->cr3;
68 hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK); 84#if PTTYPE == 64
85 if (!is_long_mode(vcpu)) {
86 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
87 root = *walker->ptep;
88 if (!(root & PT_PRESENT_MASK))
89 return;
90 --walker->level;
91 }
92#endif
93 table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
94 walker->table_gfn[walker->level - 1] = table_gfn;
95 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
96 walker->level - 1, table_gfn);
97 slot = gfn_to_memslot(vcpu->kvm, table_gfn);
98 hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
69 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); 99 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
70 100
71 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || 101 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
72 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); 102 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
73 103
74 walker->table = (pt_element_t *)( (unsigned long)walker->table |
75 (unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) );
76 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; 104 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
105
106 for (;;) {
107 int index = PT_INDEX(addr, walker->level);
108 hpa_t paddr;
109
110 ptep = &walker->table[index];
111 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
112 ((unsigned long)ptep & PAGE_MASK));
113
114 if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK))
115 *ptep |= PT_ACCESSED_MASK;
116
117 if (!is_present_pte(*ptep))
118 break;
119
120 if (walker->level == PT_PAGE_TABLE_LEVEL) {
121 walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
122 >> PAGE_SHIFT;
123 break;
124 }
125
126 if (walker->level == PT_DIRECTORY_LEVEL
127 && (*ptep & PT_PAGE_SIZE_MASK)
128 && (PTTYPE == 64 || is_pse(vcpu))) {
129 walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
130 >> PAGE_SHIFT;
131 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
132 break;
133 }
134
135 if (walker->level != 3 || is_long_mode(vcpu))
136 walker->inherited_ar &= walker->table[index];
137 table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
138 paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
139 kunmap_atomic(walker->table, KM_USER0);
140 walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
141 KM_USER0);
142 --walker->level;
143 walker->table_gfn[walker->level - 1 ] = table_gfn;
144 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
145 walker->level - 1, table_gfn);
146 }
147 walker->ptep = ptep;
148 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
77} 149}
78 150
79static void FNAME(release_walker)(struct guest_walker *walker) 151static void FNAME(release_walker)(struct guest_walker *walker)
80{ 152{
81 kunmap_atomic(walker->table, KM_USER0); 153 if (walker->table)
154 kunmap_atomic(walker->table, KM_USER0);
82} 155}
83 156
84static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, 157static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
85 u64 *shadow_pte, u64 access_bits) 158 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
86{ 159{
87 ASSERT(*shadow_pte == 0); 160 ASSERT(*shadow_pte == 0);
88 access_bits &= guest_pte; 161 access_bits &= guest_pte;
89 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); 162 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
90 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, 163 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
91 guest_pte & PT_DIRTY_MASK, access_bits); 164 guest_pte & PT_DIRTY_MASK, access_bits, gfn);
92} 165}
93 166
94static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, 167static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
95 u64 *shadow_pte, u64 access_bits, 168 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
96 int index)
97{ 169{
98 gpa_t gaddr; 170 gpa_t gaddr;
99 171
100 ASSERT(*shadow_pte == 0); 172 ASSERT(*shadow_pte == 0);
101 access_bits &= guest_pde; 173 access_bits &= guest_pde;
102 gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index; 174 gaddr = (gpa_t)gfn << PAGE_SHIFT;
103 if (PTTYPE == 32 && is_cpuid_PSE36()) 175 if (PTTYPE == 32 && is_cpuid_PSE36())
104 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << 176 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
105 (32 - PT32_DIR_PSE36_SHIFT); 177 (32 - PT32_DIR_PSE36_SHIFT);
106 *shadow_pte = guest_pde & PT_PTE_COPY_MASK; 178 *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
107 set_pte_common(vcpu, shadow_pte, gaddr, 179 set_pte_common(vcpu, shadow_pte, gaddr,
108 guest_pde & PT_DIRTY_MASK, access_bits); 180 guest_pde & PT_DIRTY_MASK, access_bits, gfn);
109}
110
111/*
112 * Fetch a guest pte from a specific level in the paging hierarchy.
113 */
114static pt_element_t *FNAME(fetch_guest)(struct kvm_vcpu *vcpu,
115 struct guest_walker *walker,
116 int level,
117 gva_t addr)
118{
119
120 ASSERT(level > 0 && level <= walker->level);
121
122 for (;;) {
123 int index = PT_INDEX(addr, walker->level);
124 hpa_t paddr;
125
126 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
127 ((unsigned long)&walker->table[index] & PAGE_MASK));
128 if (level == walker->level ||
129 !is_present_pte(walker->table[index]) ||
130 (walker->level == PT_DIRECTORY_LEVEL &&
131 (walker->table[index] & PT_PAGE_SIZE_MASK) &&
132 (PTTYPE == 64 || is_pse(vcpu))))
133 return &walker->table[index];
134 if (walker->level != 3 || is_long_mode(vcpu))
135 walker->inherited_ar &= walker->table[index];
136 paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK);
137 kunmap_atomic(walker->table, KM_USER0);
138 walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
139 KM_USER0);
140 --walker->level;
141 }
142} 181}
143 182
144/* 183/*
@@ -150,15 +189,26 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
150 hpa_t shadow_addr; 189 hpa_t shadow_addr;
151 int level; 190 int level;
152 u64 *prev_shadow_ent = NULL; 191 u64 *prev_shadow_ent = NULL;
192 pt_element_t *guest_ent = walker->ptep;
193
194 if (!is_present_pte(*guest_ent))
195 return NULL;
153 196
154 shadow_addr = vcpu->mmu.root_hpa; 197 shadow_addr = vcpu->mmu.root_hpa;
155 level = vcpu->mmu.shadow_root_level; 198 level = vcpu->mmu.shadow_root_level;
199 if (level == PT32E_ROOT_LEVEL) {
200 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
201 shadow_addr &= PT64_BASE_ADDR_MASK;
202 --level;
203 }
156 204
157 for (; ; level--) { 205 for (; ; level--) {
158 u32 index = SHADOW_PT_INDEX(addr, level); 206 u32 index = SHADOW_PT_INDEX(addr, level);
159 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; 207 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
160 pt_element_t *guest_ent; 208 struct kvm_mmu_page *shadow_page;
161 u64 shadow_pte; 209 u64 shadow_pte;
210 int metaphysical;
211 gfn_t table_gfn;
162 212
163 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { 213 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
164 if (level == PT_PAGE_TABLE_LEVEL) 214 if (level == PT_PAGE_TABLE_LEVEL)
@@ -168,21 +218,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
168 continue; 218 continue;
169 } 219 }
170 220
171 if (PTTYPE == 32 && level > PT32_ROOT_LEVEL) {
172 ASSERT(level == PT32E_ROOT_LEVEL);
173 guest_ent = FNAME(fetch_guest)(vcpu, walker,
174 PT32_ROOT_LEVEL, addr);
175 } else
176 guest_ent = FNAME(fetch_guest)(vcpu, walker,
177 level, addr);
178
179 if (!is_present_pte(*guest_ent))
180 return NULL;
181
182 /* Don't set accessed bit on PAE PDPTRs */
183 if (vcpu->mmu.root_level != 3 || walker->level != 3)
184 *guest_ent |= PT_ACCESSED_MASK;
185
186 if (level == PT_PAGE_TABLE_LEVEL) { 221 if (level == PT_PAGE_TABLE_LEVEL) {
187 222
188 if (walker->level == PT_DIRECTORY_LEVEL) { 223 if (walker->level == PT_DIRECTORY_LEVEL) {
@@ -190,21 +225,30 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
190 *prev_shadow_ent |= PT_SHADOW_PS_MARK; 225 *prev_shadow_ent |= PT_SHADOW_PS_MARK;
191 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent, 226 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
192 walker->inherited_ar, 227 walker->inherited_ar,
193 PT_INDEX(addr, PT_PAGE_TABLE_LEVEL)); 228 walker->gfn);
194 } else { 229 } else {
195 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); 230 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
196 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar); 231 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
232 walker->inherited_ar,
233 walker->gfn);
197 } 234 }
198 return shadow_ent; 235 return shadow_ent;
199 } 236 }
200 237
201 shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent); 238 if (level - 1 == PT_PAGE_TABLE_LEVEL
202 if (!VALID_PAGE(shadow_addr)) 239 && walker->level == PT_DIRECTORY_LEVEL) {
203 return ERR_PTR(-ENOMEM); 240 metaphysical = 1;
204 shadow_pte = shadow_addr | PT_PRESENT_MASK; 241 table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
205 if (vcpu->mmu.root_level > 3 || level != 3) 242 >> PAGE_SHIFT;
206 shadow_pte |= PT_ACCESSED_MASK 243 } else {
207 | PT_WRITABLE_MASK | PT_USER_MASK; 244 metaphysical = 0;
245 table_gfn = walker->table_gfn[level - 2];
246 }
247 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
248 metaphysical, shadow_ent);
249 shadow_addr = shadow_page->page_hpa;
250 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
251 | PT_WRITABLE_MASK | PT_USER_MASK;
208 *shadow_ent = shadow_pte; 252 *shadow_ent = shadow_pte;
209 prev_shadow_ent = shadow_ent; 253 prev_shadow_ent = shadow_ent;
210 } 254 }
@@ -221,11 +265,13 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
221 u64 *shadow_ent, 265 u64 *shadow_ent,
222 struct guest_walker *walker, 266 struct guest_walker *walker,
223 gva_t addr, 267 gva_t addr,
224 int user) 268 int user,
269 int *write_pt)
225{ 270{
226 pt_element_t *guest_ent; 271 pt_element_t *guest_ent;
227 int writable_shadow; 272 int writable_shadow;
228 gfn_t gfn; 273 gfn_t gfn;
274 struct kvm_mmu_page *page;
229 275
230 if (is_writeble_pte(*shadow_ent)) 276 if (is_writeble_pte(*shadow_ent))
231 return 0; 277 return 0;
@@ -250,17 +296,35 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
250 *shadow_ent &= ~PT_USER_MASK; 296 *shadow_ent &= ~PT_USER_MASK;
251 } 297 }
252 298
253 guest_ent = FNAME(fetch_guest)(vcpu, walker, PT_PAGE_TABLE_LEVEL, addr); 299 guest_ent = walker->ptep;
254 300
255 if (!is_present_pte(*guest_ent)) { 301 if (!is_present_pte(*guest_ent)) {
256 *shadow_ent = 0; 302 *shadow_ent = 0;
257 return 0; 303 return 0;
258 } 304 }
259 305
260 gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 306 gfn = walker->gfn;
307
308 if (user) {
309 /*
310 * Usermode page faults won't be for page table updates.
311 */
312 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
313 pgprintk("%s: zap %lx %x\n",
314 __FUNCTION__, gfn, page->role.word);
315 kvm_mmu_zap_page(vcpu, page);
316 }
317 } else if (kvm_mmu_lookup_page(vcpu, gfn)) {
318 pgprintk("%s: found shadow page for %lx, marking ro\n",
319 __FUNCTION__, gfn);
320 *guest_ent |= PT_DIRTY_MASK;
321 *write_pt = 1;
322 return 0;
323 }
261 mark_page_dirty(vcpu->kvm, gfn); 324 mark_page_dirty(vcpu->kvm, gfn);
262 *shadow_ent |= PT_WRITABLE_MASK; 325 *shadow_ent |= PT_WRITABLE_MASK;
263 *guest_ent |= PT_DIRTY_MASK; 326 *guest_ent |= PT_DIRTY_MASK;
327 rmap_add(vcpu, shadow_ent);
264 328
265 return 1; 329 return 1;
266} 330}
@@ -276,7 +340,8 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
276 * - normal guest page fault due to the guest pte marked not present, not 340 * - normal guest page fault due to the guest pte marked not present, not
277 * writable, or not executable 341 * writable, or not executable
278 * 342 *
279 * Returns: 1 if we need to emulate the instruction, 0 otherwise 343 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
344 * a negative value on error.
280 */ 345 */
281static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, 346static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
282 u32 error_code) 347 u32 error_code)
@@ -287,39 +352,47 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
287 struct guest_walker walker; 352 struct guest_walker walker;
288 u64 *shadow_pte; 353 u64 *shadow_pte;
289 int fixed; 354 int fixed;
355 int write_pt = 0;
356 int r;
357
358 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
359 kvm_mmu_audit(vcpu, "pre page fault");
360
361 r = mmu_topup_memory_caches(vcpu);
362 if (r)
363 return r;
290 364
291 /* 365 /*
292 * Look up the shadow pte for the faulting address. 366 * Look up the shadow pte for the faulting address.
293 */ 367 */
294 for (;;) { 368 FNAME(walk_addr)(&walker, vcpu, addr);
295 FNAME(init_walker)(&walker, vcpu); 369 shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
296 shadow_pte = FNAME(fetch)(vcpu, addr, &walker);
297 if (IS_ERR(shadow_pte)) { /* must be -ENOMEM */
298 nonpaging_flush(vcpu);
299 FNAME(release_walker)(&walker);
300 continue;
301 }
302 break;
303 }
304 370
305 /* 371 /*
306 * The page is not mapped by the guest. Let the guest handle it. 372 * The page is not mapped by the guest. Let the guest handle it.
307 */ 373 */
308 if (!shadow_pte) { 374 if (!shadow_pte) {
375 pgprintk("%s: not mapped\n", __FUNCTION__);
309 inject_page_fault(vcpu, addr, error_code); 376 inject_page_fault(vcpu, addr, error_code);
310 FNAME(release_walker)(&walker); 377 FNAME(release_walker)(&walker);
311 return 0; 378 return 0;
312 } 379 }
313 380
381 pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__,
382 shadow_pte, *shadow_pte);
383
314 /* 384 /*
315 * Update the shadow pte. 385 * Update the shadow pte.
316 */ 386 */
317 if (write_fault) 387 if (write_fault)
318 fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr, 388 fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
319 user_fault); 389 user_fault, &write_pt);
320 else 390 else
321 fixed = fix_read_pf(shadow_pte); 391 fixed = fix_read_pf(shadow_pte);
322 392
393 pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
394 shadow_pte, *shadow_pte);
395
323 FNAME(release_walker)(&walker); 396 FNAME(release_walker)(&walker);
324 397
325 /* 398 /*
@@ -331,20 +404,23 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
331 pgprintk("%s: io work, no access\n", __FUNCTION__); 404 pgprintk("%s: io work, no access\n", __FUNCTION__);
332 inject_page_fault(vcpu, addr, 405 inject_page_fault(vcpu, addr,
333 error_code | PFERR_PRESENT_MASK); 406 error_code | PFERR_PRESENT_MASK);
407 kvm_mmu_audit(vcpu, "post page fault (io)");
334 return 0; 408 return 0;
335 } 409 }
336 410
337 /* 411 /*
338 * pte not present, guest page fault. 412 * pte not present, guest page fault.
339 */ 413 */
340 if (pte_present && !fixed) { 414 if (pte_present && !fixed && !write_pt) {
341 inject_page_fault(vcpu, addr, error_code); 415 inject_page_fault(vcpu, addr, error_code);
416 kvm_mmu_audit(vcpu, "post page fault (guest)");
342 return 0; 417 return 0;
343 } 418 }
344 419
345 ++kvm_stat.pf_fixed; 420 ++kvm_stat.pf_fixed;
421 kvm_mmu_audit(vcpu, "post page fault (fixed)");
346 422
347 return 0; 423 return write_pt;
348} 424}
349 425
350static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 426static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
@@ -353,9 +429,8 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
353 pt_element_t guest_pte; 429 pt_element_t guest_pte;
354 gpa_t gpa; 430 gpa_t gpa;
355 431
356 FNAME(init_walker)(&walker, vcpu); 432 FNAME(walk_addr)(&walker, vcpu, vaddr);
357 guest_pte = *FNAME(fetch_guest)(vcpu, &walker, PT_PAGE_TABLE_LEVEL, 433 guest_pte = *walker.ptep;
358 vaddr);
359 FNAME(release_walker)(&walker); 434 FNAME(release_walker)(&walker);
360 435
361 if (!is_present_pte(guest_pte)) 436 if (!is_present_pte(guest_pte))
@@ -389,3 +464,4 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
389#undef PT_PTE_COPY_MASK 464#undef PT_PTE_COPY_MASK
390#undef PT_NON_PTE_COPY_MASK 465#undef PT_NON_PTE_COPY_MASK
391#undef PT_DIR_BASE_ADDR_MASK 466#undef PT_DIR_BASE_ADDR_MASK
467#undef PT_MAX_FULL_LEVELS
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index fa0428735717..714f6a7841cd 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/highmem.h> 19#include <linux/highmem.h>
20#include <linux/profile.h>
20#include <asm/desc.h> 21#include <asm/desc.h>
21 22
22#include "kvm_svm.h" 23#include "kvm_svm.h"
@@ -235,6 +236,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
235 236
236 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip; 237 vcpu->rip = vcpu->svm->vmcb->save.rip = vcpu->svm->next_rip;
237 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 238 vcpu->svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
239
240 vcpu->interrupt_window_open = 1;
238} 241}
239 242
240static int has_svm(void) 243static int has_svm(void)
@@ -495,7 +498,6 @@ static void init_vmcb(struct vmcb *vmcb)
495 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */ 498 /* (1ULL << INTERCEPT_SELECTIVE_CR0) | */
496 (1ULL << INTERCEPT_CPUID) | 499 (1ULL << INTERCEPT_CPUID) |
497 (1ULL << INTERCEPT_HLT) | 500 (1ULL << INTERCEPT_HLT) |
498 (1ULL << INTERCEPT_INVLPG) |
499 (1ULL << INTERCEPT_INVLPGA) | 501 (1ULL << INTERCEPT_INVLPGA) |
500 (1ULL << INTERCEPT_IOIO_PROT) | 502 (1ULL << INTERCEPT_IOIO_PROT) |
501 (1ULL << INTERCEPT_MSR_PROT) | 503 (1ULL << INTERCEPT_MSR_PROT) |
@@ -700,6 +702,10 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
700 vcpu->svm->vmcb->save.gdtr.base = dt->base ; 702 vcpu->svm->vmcb->save.gdtr.base = dt->base ;
701} 703}
702 704
705static void svm_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu)
706{
707}
708
703static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 709static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
704{ 710{
705#ifdef CONFIG_X86_64 711#ifdef CONFIG_X86_64
@@ -847,6 +853,7 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
847 u64 fault_address; 853 u64 fault_address;
848 u32 error_code; 854 u32 error_code;
849 enum emulation_result er; 855 enum emulation_result er;
856 int r;
850 857
851 if (is_external_interrupt(exit_int_info)) 858 if (is_external_interrupt(exit_int_info))
852 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK); 859 push_irq(vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
@@ -855,7 +862,12 @@ static int pf_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
855 862
856 fault_address = vcpu->svm->vmcb->control.exit_info_2; 863 fault_address = vcpu->svm->vmcb->control.exit_info_2;
857 error_code = vcpu->svm->vmcb->control.exit_info_1; 864 error_code = vcpu->svm->vmcb->control.exit_info_1;
858 if (!vcpu->mmu.page_fault(vcpu, fault_address, error_code)) { 865 r = kvm_mmu_page_fault(vcpu, fault_address, error_code);
866 if (r < 0) {
867 spin_unlock(&vcpu->kvm->lock);
868 return r;
869 }
870 if (!r) {
859 spin_unlock(&vcpu->kvm->lock); 871 spin_unlock(&vcpu->kvm->lock);
860 return 1; 872 return 1;
861 } 873 }
@@ -1031,10 +1043,11 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1031{ 1043{
1032 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; 1044 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
1033 skip_emulated_instruction(vcpu); 1045 skip_emulated_instruction(vcpu);
1034 if (vcpu->irq_summary && (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF)) 1046 if (vcpu->irq_summary)
1035 return 1; 1047 return 1;
1036 1048
1037 kvm_run->exit_reason = KVM_EXIT_HLT; 1049 kvm_run->exit_reason = KVM_EXIT_HLT;
1050 ++kvm_stat.halt_exits;
1038 return 0; 1051 return 0;
1039} 1052}
1040 1053
@@ -1186,6 +1199,23 @@ static int msr_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1186 return rdmsr_interception(vcpu, kvm_run); 1199 return rdmsr_interception(vcpu, kvm_run);
1187} 1200}
1188 1201
1202static int interrupt_window_interception(struct kvm_vcpu *vcpu,
1203 struct kvm_run *kvm_run)
1204{
1205 /*
1206 * If the user space waits to inject interrupts, exit as soon as
1207 * possible
1208 */
1209 if (kvm_run->request_interrupt_window &&
1210 !vcpu->irq_summary) {
1211 ++kvm_stat.irq_window_exits;
1212 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1213 return 0;
1214 }
1215
1216 return 1;
1217}
1218
1189static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu, 1219static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1190 struct kvm_run *kvm_run) = { 1220 struct kvm_run *kvm_run) = {
1191 [SVM_EXIT_READ_CR0] = emulate_on_interception, 1221 [SVM_EXIT_READ_CR0] = emulate_on_interception,
@@ -1210,6 +1240,7 @@ static int (*svm_exit_handlers[])(struct kvm_vcpu *vcpu,
1210 [SVM_EXIT_NMI] = nop_on_interception, 1240 [SVM_EXIT_NMI] = nop_on_interception,
1211 [SVM_EXIT_SMI] = nop_on_interception, 1241 [SVM_EXIT_SMI] = nop_on_interception,
1212 [SVM_EXIT_INIT] = nop_on_interception, 1242 [SVM_EXIT_INIT] = nop_on_interception,
1243 [SVM_EXIT_VINTR] = interrupt_window_interception,
1213 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */ 1244 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1214 [SVM_EXIT_CPUID] = cpuid_interception, 1245 [SVM_EXIT_CPUID] = cpuid_interception,
1215 [SVM_EXIT_HLT] = halt_interception, 1246 [SVM_EXIT_HLT] = halt_interception,
@@ -1278,15 +1309,11 @@ static void pre_svm_run(struct kvm_vcpu *vcpu)
1278} 1309}
1279 1310
1280 1311
1281static inline void kvm_try_inject_irq(struct kvm_vcpu *vcpu) 1312static inline void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1282{ 1313{
1283 struct vmcb_control_area *control; 1314 struct vmcb_control_area *control;
1284 1315
1285 if (!vcpu->irq_summary)
1286 return;
1287
1288 control = &vcpu->svm->vmcb->control; 1316 control = &vcpu->svm->vmcb->control;
1289
1290 control->int_vector = pop_irq(vcpu); 1317 control->int_vector = pop_irq(vcpu);
1291 control->int_ctl &= ~V_INTR_PRIO_MASK; 1318 control->int_ctl &= ~V_INTR_PRIO_MASK;
1292 control->int_ctl |= V_IRQ_MASK | 1319 control->int_ctl |= V_IRQ_MASK |
@@ -1301,6 +1328,59 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu)
1301 control->int_ctl &= ~V_IRQ_MASK; 1328 control->int_ctl &= ~V_IRQ_MASK;
1302 push_irq(vcpu, control->int_vector); 1329 push_irq(vcpu, control->int_vector);
1303 } 1330 }
1331
1332 vcpu->interrupt_window_open =
1333 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1334}
1335
1336static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1337 struct kvm_run *kvm_run)
1338{
1339 struct vmcb_control_area *control = &vcpu->svm->vmcb->control;
1340
1341 vcpu->interrupt_window_open =
1342 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1343 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1344
1345 if (vcpu->interrupt_window_open && vcpu->irq_summary)
1346 /*
1347 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1348 */
1349 kvm_do_inject_irq(vcpu);
1350
1351 /*
1352 * Interrupts blocked. Wait for unblock.
1353 */
1354 if (!vcpu->interrupt_window_open &&
1355 (vcpu->irq_summary || kvm_run->request_interrupt_window)) {
1356 control->intercept |= 1ULL << INTERCEPT_VINTR;
1357 } else
1358 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1359}
1360
1361static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1362 struct kvm_run *kvm_run)
1363{
1364 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1365 vcpu->irq_summary == 0);
1366 kvm_run->if_flag = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF) != 0;
1367 kvm_run->cr8 = vcpu->cr8;
1368 kvm_run->apic_base = vcpu->apic_base;
1369}
1370
1371/*
1372 * Check if userspace requested an interrupt window, and that the
1373 * interrupt window is open.
1374 *
1375 * No need to exit to userspace if we already have an interrupt queued.
1376 */
1377static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1378 struct kvm_run *kvm_run)
1379{
1380 return (!vcpu->irq_summary &&
1381 kvm_run->request_interrupt_window &&
1382 vcpu->interrupt_window_open &&
1383 (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_IF));
1304} 1384}
1305 1385
1306static void save_db_regs(unsigned long *db_regs) 1386static void save_db_regs(unsigned long *db_regs)
@@ -1324,9 +1404,10 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1324 u16 fs_selector; 1404 u16 fs_selector;
1325 u16 gs_selector; 1405 u16 gs_selector;
1326 u16 ldt_selector; 1406 u16 ldt_selector;
1407 int r;
1327 1408
1328again: 1409again:
1329 kvm_try_inject_irq(vcpu); 1410 do_interrupt_requests(vcpu, kvm_run);
1330 1411
1331 clgi(); 1412 clgi();
1332 1413
@@ -1478,6 +1559,13 @@ again:
1478 1559
1479 reload_tss(vcpu); 1560 reload_tss(vcpu);
1480 1561
1562 /*
1563 * Profile KVM exit RIPs:
1564 */
1565 if (unlikely(prof_on == KVM_PROFILING))
1566 profile_hit(KVM_PROFILING,
1567 (void *)(unsigned long)vcpu->svm->vmcb->save.rip);
1568
1481 stgi(); 1569 stgi();
1482 1570
1483 kvm_reput_irq(vcpu); 1571 kvm_reput_irq(vcpu);
@@ -1487,18 +1575,28 @@ again:
1487 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 1575 if (vcpu->svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1488 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; 1576 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
1489 kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code; 1577 kvm_run->exit_reason = vcpu->svm->vmcb->control.exit_code;
1578 post_kvm_run_save(vcpu, kvm_run);
1490 return 0; 1579 return 0;
1491 } 1580 }
1492 1581
1493 if (handle_exit(vcpu, kvm_run)) { 1582 r = handle_exit(vcpu, kvm_run);
1583 if (r > 0) {
1494 if (signal_pending(current)) { 1584 if (signal_pending(current)) {
1495 ++kvm_stat.signal_exits; 1585 ++kvm_stat.signal_exits;
1586 post_kvm_run_save(vcpu, kvm_run);
1587 return -EINTR;
1588 }
1589
1590 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1591 ++kvm_stat.request_irq_exits;
1592 post_kvm_run_save(vcpu, kvm_run);
1496 return -EINTR; 1593 return -EINTR;
1497 } 1594 }
1498 kvm_resched(vcpu); 1595 kvm_resched(vcpu);
1499 goto again; 1596 goto again;
1500 } 1597 }
1501 return 0; 1598 post_kvm_run_save(vcpu, kvm_run);
1599 return r;
1502} 1600}
1503 1601
1504static void svm_flush_tlb(struct kvm_vcpu *vcpu) 1602static void svm_flush_tlb(struct kvm_vcpu *vcpu)
@@ -1565,6 +1663,7 @@ static struct kvm_arch_ops svm_arch_ops = {
1565 .get_segment = svm_get_segment, 1663 .get_segment = svm_get_segment,
1566 .set_segment = svm_set_segment, 1664 .set_segment = svm_set_segment,
1567 .get_cs_db_l_bits = svm_get_cs_db_l_bits, 1665 .get_cs_db_l_bits = svm_get_cs_db_l_bits,
1666 .decache_cr0_cr4_guest_bits = svm_decache_cr0_cr4_guest_bits,
1568 .set_cr0 = svm_set_cr0, 1667 .set_cr0 = svm_set_cr0,
1569 .set_cr0_no_modeswitch = svm_set_cr0, 1668 .set_cr0_no_modeswitch = svm_set_cr0,
1570 .set_cr3 = svm_set_cr3, 1669 .set_cr3 = svm_set_cr3,
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index d0a2c2d5342a..ce219e3f557f 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -21,6 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/profile.h>
24#include <asm/io.h> 25#include <asm/io.h>
25#include <asm/desc.h> 26#include <asm/desc.h>
26 27
@@ -116,7 +117,7 @@ static void vmcs_clear(struct vmcs *vmcs)
116static void __vcpu_clear(void *arg) 117static void __vcpu_clear(void *arg)
117{ 118{
118 struct kvm_vcpu *vcpu = arg; 119 struct kvm_vcpu *vcpu = arg;
119 int cpu = smp_processor_id(); 120 int cpu = raw_smp_processor_id();
120 121
121 if (vcpu->cpu == cpu) 122 if (vcpu->cpu == cpu)
122 vmcs_clear(vcpu->vmcs); 123 vmcs_clear(vcpu->vmcs);
@@ -152,15 +153,21 @@ static u64 vmcs_read64(unsigned long field)
152#endif 153#endif
153} 154}
154 155
156static noinline void vmwrite_error(unsigned long field, unsigned long value)
157{
158 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
159 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
160 dump_stack();
161}
162
155static void vmcs_writel(unsigned long field, unsigned long value) 163static void vmcs_writel(unsigned long field, unsigned long value)
156{ 164{
157 u8 error; 165 u8 error;
158 166
159 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0" 167 asm volatile (ASM_VMX_VMWRITE_RAX_RDX "; setna %0"
160 : "=q"(error) : "a"(value), "d"(field) : "cc" ); 168 : "=q"(error) : "a"(value), "d"(field) : "cc" );
161 if (error) 169 if (unlikely(error))
162 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", 170 vmwrite_error(field, value);
163 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
164} 171}
165 172
166static void vmcs_write16(unsigned long field, u16 value) 173static void vmcs_write16(unsigned long field, u16 value)
@@ -263,6 +270,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
263 if (interruptibility & 3) 270 if (interruptibility & 3)
264 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 271 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
265 interruptibility & ~3); 272 interruptibility & ~3);
273 vcpu->interrupt_window_open = 1;
266} 274}
267 275
268static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code) 276static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
@@ -541,7 +549,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
541 549
542static struct vmcs *alloc_vmcs(void) 550static struct vmcs *alloc_vmcs(void)
543{ 551{
544 return alloc_vmcs_cpu(smp_processor_id()); 552 return alloc_vmcs_cpu(raw_smp_processor_id());
545} 553}
546 554
547static void free_vmcs(struct vmcs *vmcs) 555static void free_vmcs(struct vmcs *vmcs)
@@ -736,6 +744,15 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
736 744
737#endif 745#endif
738 746
747static void vmx_decache_cr0_cr4_guest_bits(struct kvm_vcpu *vcpu)
748{
749 vcpu->cr0 &= KVM_GUEST_CR0_MASK;
750 vcpu->cr0 |= vmcs_readl(GUEST_CR0) & ~KVM_GUEST_CR0_MASK;
751
752 vcpu->cr4 &= KVM_GUEST_CR4_MASK;
753 vcpu->cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
754}
755
739static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 756static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
740{ 757{
741 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) 758 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
@@ -1011,8 +1028,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1011 vmcs_writel(GUEST_RIP, 0xfff0); 1028 vmcs_writel(GUEST_RIP, 0xfff0);
1012 vmcs_writel(GUEST_RSP, 0); 1029 vmcs_writel(GUEST_RSP, 0);
1013 1030
1014 vmcs_writel(GUEST_CR3, 0);
1015
1016 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 1031 //todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0
1017 vmcs_writel(GUEST_DR7, 0x400); 1032 vmcs_writel(GUEST_DR7, 0x400);
1018 1033
@@ -1049,7 +1064,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1049 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */ 1064 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
1050 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */ 1065 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
1051 | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */ 1066 | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */
1052 | CPU_BASED_INVDPG_EXITING
1053 | CPU_BASED_MOV_DR_EXITING 1067 | CPU_BASED_MOV_DR_EXITING
1054 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */ 1068 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
1055 ); 1069 );
@@ -1094,14 +1108,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1094 rdmsrl(MSR_IA32_SYSENTER_EIP, a); 1108 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
1095 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ 1109 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
1096 1110
1097 ret = -ENOMEM;
1098 vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1099 if (!vcpu->guest_msrs)
1100 goto out;
1101 vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1102 if (!vcpu->host_msrs)
1103 goto out_free_guest_msrs;
1104
1105 for (i = 0; i < NR_VMX_MSR; ++i) { 1111 for (i = 0; i < NR_VMX_MSR; ++i) {
1106 u32 index = vmx_msr_index[i]; 1112 u32 index = vmx_msr_index[i];
1107 u32 data_low, data_high; 1113 u32 data_low, data_high;
@@ -1155,8 +1161,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1155 1161
1156 return 0; 1162 return 0;
1157 1163
1158out_free_guest_msrs:
1159 kfree(vcpu->guest_msrs);
1160out: 1164out:
1161 return ret; 1165 return ret;
1162} 1166}
@@ -1224,21 +1228,34 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu)
1224 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 1228 irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1225} 1229}
1226 1230
1227static void kvm_try_inject_irq(struct kvm_vcpu *vcpu) 1231
1232static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1233 struct kvm_run *kvm_run)
1228{ 1234{
1229 if ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) 1235 u32 cpu_based_vm_exec_control;
1230 && (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0) 1236
1237 vcpu->interrupt_window_open =
1238 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
1239 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0);
1240
1241 if (vcpu->interrupt_window_open &&
1242 vcpu->irq_summary &&
1243 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1231 /* 1244 /*
1232 * Interrupts enabled, and not blocked by sti or mov ss. Good. 1245 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1233 */ 1246 */
1234 kvm_do_inject_irq(vcpu); 1247 kvm_do_inject_irq(vcpu);
1235 else 1248
1249 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
1250 if (!vcpu->interrupt_window_open &&
1251 (vcpu->irq_summary || kvm_run->request_interrupt_window))
1236 /* 1252 /*
1237 * Interrupts blocked. Wait for unblock. 1253 * Interrupts blocked. Wait for unblock.
1238 */ 1254 */
1239 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1255 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
1240 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) 1256 else
1241 | CPU_BASED_VIRTUAL_INTR_PENDING); 1257 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
1258 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
1242} 1259}
1243 1260
1244static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) 1261static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
@@ -1277,6 +1294,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1277 unsigned long cr2, rip; 1294 unsigned long cr2, rip;
1278 u32 vect_info; 1295 u32 vect_info;
1279 enum emulation_result er; 1296 enum emulation_result er;
1297 int r;
1280 1298
1281 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 1299 vect_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
1282 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 1300 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
@@ -1305,7 +1323,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1305 cr2 = vmcs_readl(EXIT_QUALIFICATION); 1323 cr2 = vmcs_readl(EXIT_QUALIFICATION);
1306 1324
1307 spin_lock(&vcpu->kvm->lock); 1325 spin_lock(&vcpu->kvm->lock);
1308 if (!vcpu->mmu.page_fault(vcpu, cr2, error_code)) { 1326 r = kvm_mmu_page_fault(vcpu, cr2, error_code);
1327 if (r < 0) {
1328 spin_unlock(&vcpu->kvm->lock);
1329 return r;
1330 }
1331 if (!r) {
1309 spin_unlock(&vcpu->kvm->lock); 1332 spin_unlock(&vcpu->kvm->lock);
1310 return 1; 1333 return 1;
1311 } 1334 }
@@ -1425,17 +1448,6 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1425 return 0; 1448 return 0;
1426} 1449}
1427 1450
1428static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1429{
1430 u64 address = vmcs_read64(EXIT_QUALIFICATION);
1431 int instruction_length = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1432 spin_lock(&vcpu->kvm->lock);
1433 vcpu->mmu.inval_page(vcpu, address);
1434 spin_unlock(&vcpu->kvm->lock);
1435 vmcs_writel(GUEST_RIP, vmcs_readl(GUEST_RIP) + instruction_length);
1436 return 1;
1437}
1438
1439static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1451static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1440{ 1452{
1441 u64 exit_qualification; 1453 u64 exit_qualification;
@@ -1575,23 +1587,40 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1575 return 1; 1587 return 1;
1576} 1588}
1577 1589
1590static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1591 struct kvm_run *kvm_run)
1592{
1593 kvm_run->if_flag = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) != 0;
1594 kvm_run->cr8 = vcpu->cr8;
1595 kvm_run->apic_base = vcpu->apic_base;
1596 kvm_run->ready_for_interrupt_injection = (vcpu->interrupt_window_open &&
1597 vcpu->irq_summary == 0);
1598}
1599
1578static int handle_interrupt_window(struct kvm_vcpu *vcpu, 1600static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1579 struct kvm_run *kvm_run) 1601 struct kvm_run *kvm_run)
1580{ 1602{
1581 /* Turn off interrupt window reporting. */ 1603 /*
1582 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1604 * If the user space waits to inject interrupts, exit as soon as
1583 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) 1605 * possible
1584 & ~CPU_BASED_VIRTUAL_INTR_PENDING); 1606 */
1607 if (kvm_run->request_interrupt_window &&
1608 !vcpu->irq_summary) {
1609 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1610 ++kvm_stat.irq_window_exits;
1611 return 0;
1612 }
1585 return 1; 1613 return 1;
1586} 1614}
1587 1615
1588static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1616static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1589{ 1617{
1590 skip_emulated_instruction(vcpu); 1618 skip_emulated_instruction(vcpu);
1591 if (vcpu->irq_summary && (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)) 1619 if (vcpu->irq_summary)
1592 return 1; 1620 return 1;
1593 1621
1594 kvm_run->exit_reason = KVM_EXIT_HLT; 1622 kvm_run->exit_reason = KVM_EXIT_HLT;
1623 ++kvm_stat.halt_exits;
1595 return 0; 1624 return 0;
1596} 1625}
1597 1626
@@ -1605,7 +1634,6 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1605 [EXIT_REASON_EXCEPTION_NMI] = handle_exception, 1634 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
1606 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, 1635 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
1607 [EXIT_REASON_IO_INSTRUCTION] = handle_io, 1636 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
1608 [EXIT_REASON_INVLPG] = handle_invlpg,
1609 [EXIT_REASON_CR_ACCESS] = handle_cr, 1637 [EXIT_REASON_CR_ACCESS] = handle_cr,
1610 [EXIT_REASON_DR_ACCESS] = handle_dr, 1638 [EXIT_REASON_DR_ACCESS] = handle_dr,
1611 [EXIT_REASON_CPUID] = handle_cpuid, 1639 [EXIT_REASON_CPUID] = handle_cpuid,
@@ -1642,11 +1670,27 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1642 return 0; 1670 return 0;
1643} 1671}
1644 1672
1673/*
1674 * Check if userspace requested an interrupt window, and that the
1675 * interrupt window is open.
1676 *
1677 * No need to exit to userspace if we already have an interrupt queued.
1678 */
1679static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1680 struct kvm_run *kvm_run)
1681{
1682 return (!vcpu->irq_summary &&
1683 kvm_run->request_interrupt_window &&
1684 vcpu->interrupt_window_open &&
1685 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1686}
1687
1645static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1688static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1646{ 1689{
1647 u8 fail; 1690 u8 fail;
1648 u16 fs_sel, gs_sel, ldt_sel; 1691 u16 fs_sel, gs_sel, ldt_sel;
1649 int fs_gs_ldt_reload_needed; 1692 int fs_gs_ldt_reload_needed;
1693 int r;
1650 1694
1651again: 1695again:
1652 /* 1696 /*
@@ -1673,9 +1717,7 @@ again:
1673 vmcs_writel(HOST_GS_BASE, segment_base(gs_sel)); 1717 vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
1674#endif 1718#endif
1675 1719
1676 if (vcpu->irq_summary && 1720 do_interrupt_requests(vcpu, kvm_run);
1677 !(vmcs_read32(VM_ENTRY_INTR_INFO_FIELD) & INTR_INFO_VALID_MASK))
1678 kvm_try_inject_irq(vcpu);
1679 1721
1680 if (vcpu->guest_debug.enabled) 1722 if (vcpu->guest_debug.enabled)
1681 kvm_guest_debug_pre(vcpu); 1723 kvm_guest_debug_pre(vcpu);
@@ -1812,15 +1854,23 @@ again:
1812 1854
1813 fx_save(vcpu->guest_fx_image); 1855 fx_save(vcpu->guest_fx_image);
1814 fx_restore(vcpu->host_fx_image); 1856 fx_restore(vcpu->host_fx_image);
1857 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
1815 1858
1816#ifndef CONFIG_X86_64 1859#ifndef CONFIG_X86_64
1817 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 1860 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
1818#endif 1861#endif
1819 1862
1863 /*
1864 * Profile KVM exit RIPs:
1865 */
1866 if (unlikely(prof_on == KVM_PROFILING))
1867 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
1868
1820 kvm_run->exit_type = 0; 1869 kvm_run->exit_type = 0;
1821 if (fail) { 1870 if (fail) {
1822 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY; 1871 kvm_run->exit_type = KVM_EXIT_TYPE_FAIL_ENTRY;
1823 kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR); 1872 kvm_run->exit_reason = vmcs_read32(VM_INSTRUCTION_ERROR);
1873 r = 0;
1824 } else { 1874 } else {
1825 if (fs_gs_ldt_reload_needed) { 1875 if (fs_gs_ldt_reload_needed) {
1826 load_ldt(ldt_sel); 1876 load_ldt(ldt_sel);
@@ -1840,17 +1890,28 @@ again:
1840 } 1890 }
1841 vcpu->launched = 1; 1891 vcpu->launched = 1;
1842 kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT; 1892 kvm_run->exit_type = KVM_EXIT_TYPE_VM_EXIT;
1843 if (kvm_handle_exit(kvm_run, vcpu)) { 1893 r = kvm_handle_exit(kvm_run, vcpu);
1894 if (r > 0) {
1844 /* Give scheduler a change to reschedule. */ 1895 /* Give scheduler a change to reschedule. */
1845 if (signal_pending(current)) { 1896 if (signal_pending(current)) {
1846 ++kvm_stat.signal_exits; 1897 ++kvm_stat.signal_exits;
1898 post_kvm_run_save(vcpu, kvm_run);
1847 return -EINTR; 1899 return -EINTR;
1848 } 1900 }
1901
1902 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1903 ++kvm_stat.request_irq_exits;
1904 post_kvm_run_save(vcpu, kvm_run);
1905 return -EINTR;
1906 }
1907
1849 kvm_resched(vcpu); 1908 kvm_resched(vcpu);
1850 goto again; 1909 goto again;
1851 } 1910 }
1852 } 1911 }
1853 return 0; 1912
1913 post_kvm_run_save(vcpu, kvm_run);
1914 return r;
1854} 1915}
1855 1916
1856static void vmx_flush_tlb(struct kvm_vcpu *vcpu) 1917static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
@@ -1906,13 +1967,33 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
1906{ 1967{
1907 struct vmcs *vmcs; 1968 struct vmcs *vmcs;
1908 1969
1970 vcpu->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1971 if (!vcpu->guest_msrs)
1972 return -ENOMEM;
1973
1974 vcpu->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
1975 if (!vcpu->host_msrs)
1976 goto out_free_guest_msrs;
1977
1909 vmcs = alloc_vmcs(); 1978 vmcs = alloc_vmcs();
1910 if (!vmcs) 1979 if (!vmcs)
1911 return -ENOMEM; 1980 goto out_free_msrs;
1981
1912 vmcs_clear(vmcs); 1982 vmcs_clear(vmcs);
1913 vcpu->vmcs = vmcs; 1983 vcpu->vmcs = vmcs;
1914 vcpu->launched = 0; 1984 vcpu->launched = 0;
1985
1915 return 0; 1986 return 0;
1987
1988out_free_msrs:
1989 kfree(vcpu->host_msrs);
1990 vcpu->host_msrs = NULL;
1991
1992out_free_guest_msrs:
1993 kfree(vcpu->guest_msrs);
1994 vcpu->guest_msrs = NULL;
1995
1996 return -ENOMEM;
1916} 1997}
1917 1998
1918static struct kvm_arch_ops vmx_arch_ops = { 1999static struct kvm_arch_ops vmx_arch_ops = {
@@ -1936,6 +2017,7 @@ static struct kvm_arch_ops vmx_arch_ops = {
1936 .get_segment = vmx_get_segment, 2017 .get_segment = vmx_get_segment,
1937 .set_segment = vmx_set_segment, 2018 .set_segment = vmx_set_segment,
1938 .get_cs_db_l_bits = vmx_get_cs_db_l_bits, 2019 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
2020 .decache_cr0_cr4_guest_bits = vmx_decache_cr0_cr4_guest_bits,
1939 .set_cr0 = vmx_set_cr0, 2021 .set_cr0 = vmx_set_cr0,
1940 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch, 2022 .set_cr0_no_modeswitch = vmx_set_cr0_no_modeswitch,
1941 .set_cr3 = vmx_set_cr3, 2023 .set_cr3 = vmx_set_cr3,
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 1bff3e925fda..be70795b4822 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -1323,7 +1323,7 @@ twobyte_special_insn:
1323 ctxt)) != 0)) 1323 ctxt)) != 0))
1324 goto done; 1324 goto done;
1325 if ((old_lo != _regs[VCPU_REGS_RAX]) 1325 if ((old_lo != _regs[VCPU_REGS_RAX])
1326 || (old_hi != _regs[VCPU_REGS_RDI])) { 1326 || (old_hi != _regs[VCPU_REGS_RDX])) {
1327 _regs[VCPU_REGS_RAX] = old_lo; 1327 _regs[VCPU_REGS_RAX] = old_lo;
1328 _regs[VCPU_REGS_RDX] = old_hi; 1328 _regs[VCPU_REGS_RDX] = old_hi;
1329 _eflags &= ~EFLG_ZF; 1329 _eflags &= ~EFLG_ZF;
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index fb1edc1c9edb..50914439d861 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -16,7 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/leds.h> 17#include <linux/leds.h>
18 18
19#include <asm/arch/hardware.h> 19#include <asm/hardware.h>
20#include <asm/arch/regs-gpio.h> 20#include <asm/arch/regs-gpio.h>
21#include <asm/arch/leds-gpio.h> 21#include <asm/arch/leds-gpio.h>
22 22
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index c8558d4ed506..8ca75e52f637 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -44,6 +44,7 @@
44#include <linux/sysdev.h> 44#include <linux/sysdev.h>
45#include <linux/freezer.h> 45#include <linux/freezer.h>
46#include <linux/syscalls.h> 46#include <linux/syscalls.h>
47#include <linux/suspend.h>
47#include <linux/cpu.h> 48#include <linux/cpu.h>
48#include <asm/prom.h> 49#include <asm/prom.h>
49#include <asm/machdep.h> 50#include <asm/machdep.h>
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index b30f74be3982..164b25dca101 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -775,6 +775,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
775 struct bio_list bl; 775 struct bio_list bl;
776 struct page **behind_pages = NULL; 776 struct page **behind_pages = NULL;
777 const int rw = bio_data_dir(bio); 777 const int rw = bio_data_dir(bio);
778 const int do_sync = bio_sync(bio);
778 int do_barriers; 779 int do_barriers;
779 780
780 /* 781 /*
@@ -835,7 +836,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
835 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 836 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
836 read_bio->bi_bdev = mirror->rdev->bdev; 837 read_bio->bi_bdev = mirror->rdev->bdev;
837 read_bio->bi_end_io = raid1_end_read_request; 838 read_bio->bi_end_io = raid1_end_read_request;
838 read_bio->bi_rw = READ; 839 read_bio->bi_rw = READ | do_sync;
839 read_bio->bi_private = r1_bio; 840 read_bio->bi_private = r1_bio;
840 841
841 generic_make_request(read_bio); 842 generic_make_request(read_bio);
@@ -906,7 +907,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
906 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 907 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
907 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 908 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
908 mbio->bi_end_io = raid1_end_write_request; 909 mbio->bi_end_io = raid1_end_write_request;
909 mbio->bi_rw = WRITE | do_barriers; 910 mbio->bi_rw = WRITE | do_barriers | do_sync;
910 mbio->bi_private = r1_bio; 911 mbio->bi_private = r1_bio;
911 912
912 if (behind_pages) { 913 if (behind_pages) {
@@ -941,6 +942,8 @@ static int make_request(request_queue_t *q, struct bio * bio)
941 blk_plug_device(mddev->queue); 942 blk_plug_device(mddev->queue);
942 spin_unlock_irqrestore(&conf->device_lock, flags); 943 spin_unlock_irqrestore(&conf->device_lock, flags);
943 944
945 if (do_sync)
946 md_wakeup_thread(mddev->thread);
944#if 0 947#if 0
945 while ((bio = bio_list_pop(&bl)) != NULL) 948 while ((bio = bio_list_pop(&bl)) != NULL)
946 generic_make_request(bio); 949 generic_make_request(bio);
@@ -1541,6 +1544,7 @@ static void raid1d(mddev_t *mddev)
1541 * We already have a nr_pending reference on these rdevs. 1544 * We already have a nr_pending reference on these rdevs.
1542 */ 1545 */
1543 int i; 1546 int i;
1547 const int do_sync = bio_sync(r1_bio->master_bio);
1544 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1548 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1545 clear_bit(R1BIO_Barrier, &r1_bio->state); 1549 clear_bit(R1BIO_Barrier, &r1_bio->state);
1546 for (i=0; i < conf->raid_disks; i++) 1550 for (i=0; i < conf->raid_disks; i++)
@@ -1561,7 +1565,7 @@ static void raid1d(mddev_t *mddev)
1561 conf->mirrors[i].rdev->data_offset; 1565 conf->mirrors[i].rdev->data_offset;
1562 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1566 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1563 bio->bi_end_io = raid1_end_write_request; 1567 bio->bi_end_io = raid1_end_write_request;
1564 bio->bi_rw = WRITE; 1568 bio->bi_rw = WRITE | do_sync;
1565 bio->bi_private = r1_bio; 1569 bio->bi_private = r1_bio;
1566 r1_bio->bios[i] = bio; 1570 r1_bio->bios[i] = bio;
1567 generic_make_request(bio); 1571 generic_make_request(bio);
@@ -1593,6 +1597,7 @@ static void raid1d(mddev_t *mddev)
1593 (unsigned long long)r1_bio->sector); 1597 (unsigned long long)r1_bio->sector);
1594 raid_end_bio_io(r1_bio); 1598 raid_end_bio_io(r1_bio);
1595 } else { 1599 } else {
1600 const int do_sync = bio_sync(r1_bio->master_bio);
1596 r1_bio->bios[r1_bio->read_disk] = 1601 r1_bio->bios[r1_bio->read_disk] =
1597 mddev->ro ? IO_BLOCKED : NULL; 1602 mddev->ro ? IO_BLOCKED : NULL;
1598 r1_bio->read_disk = disk; 1603 r1_bio->read_disk = disk;
@@ -1608,7 +1613,7 @@ static void raid1d(mddev_t *mddev)
1608 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1613 bio->bi_sector = r1_bio->sector + rdev->data_offset;
1609 bio->bi_bdev = rdev->bdev; 1614 bio->bi_bdev = rdev->bdev;
1610 bio->bi_end_io = raid1_end_read_request; 1615 bio->bi_end_io = raid1_end_read_request;
1611 bio->bi_rw = READ; 1616 bio->bi_rw = READ | do_sync;
1612 bio->bi_private = r1_bio; 1617 bio->bi_private = r1_bio;
1613 unplug = 1; 1618 unplug = 1;
1614 generic_make_request(bio); 1619 generic_make_request(bio);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f0141910bb8d..a9401c017e35 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -782,6 +782,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
782 int i; 782 int i;
783 int chunk_sects = conf->chunk_mask + 1; 783 int chunk_sects = conf->chunk_mask + 1;
784 const int rw = bio_data_dir(bio); 784 const int rw = bio_data_dir(bio);
785 const int do_sync = bio_sync(bio);
785 struct bio_list bl; 786 struct bio_list bl;
786 unsigned long flags; 787 unsigned long flags;
787 788
@@ -863,7 +864,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
863 mirror->rdev->data_offset; 864 mirror->rdev->data_offset;
864 read_bio->bi_bdev = mirror->rdev->bdev; 865 read_bio->bi_bdev = mirror->rdev->bdev;
865 read_bio->bi_end_io = raid10_end_read_request; 866 read_bio->bi_end_io = raid10_end_read_request;
866 read_bio->bi_rw = READ; 867 read_bio->bi_rw = READ | do_sync;
867 read_bio->bi_private = r10_bio; 868 read_bio->bi_private = r10_bio;
868 869
869 generic_make_request(read_bio); 870 generic_make_request(read_bio);
@@ -909,7 +910,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
909 conf->mirrors[d].rdev->data_offset; 910 conf->mirrors[d].rdev->data_offset;
910 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 911 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
911 mbio->bi_end_io = raid10_end_write_request; 912 mbio->bi_end_io = raid10_end_write_request;
912 mbio->bi_rw = WRITE; 913 mbio->bi_rw = WRITE | do_sync;
913 mbio->bi_private = r10_bio; 914 mbio->bi_private = r10_bio;
914 915
915 atomic_inc(&r10_bio->remaining); 916 atomic_inc(&r10_bio->remaining);
@@ -922,6 +923,9 @@ static int make_request(request_queue_t *q, struct bio * bio)
922 blk_plug_device(mddev->queue); 923 blk_plug_device(mddev->queue);
923 spin_unlock_irqrestore(&conf->device_lock, flags); 924 spin_unlock_irqrestore(&conf->device_lock, flags);
924 925
926 if (do_sync)
927 md_wakeup_thread(mddev->thread);
928
925 return 0; 929 return 0;
926} 930}
927 931
@@ -1563,6 +1567,7 @@ static void raid10d(mddev_t *mddev)
1563 (unsigned long long)r10_bio->sector); 1567 (unsigned long long)r10_bio->sector);
1564 raid_end_bio_io(r10_bio); 1568 raid_end_bio_io(r10_bio);
1565 } else { 1569 } else {
1570 const int do_sync = bio_sync(r10_bio->master_bio);
1566 rdev = conf->mirrors[mirror].rdev; 1571 rdev = conf->mirrors[mirror].rdev;
1567 if (printk_ratelimit()) 1572 if (printk_ratelimit())
1568 printk(KERN_ERR "raid10: %s: redirecting sector %llu to" 1573 printk(KERN_ERR "raid10: %s: redirecting sector %llu to"
@@ -1574,7 +1579,7 @@ static void raid10d(mddev_t *mddev)
1574 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr 1579 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
1575 + rdev->data_offset; 1580 + rdev->data_offset;
1576 bio->bi_bdev = rdev->bdev; 1581 bio->bi_bdev = rdev->bdev;
1577 bio->bi_rw = READ; 1582 bio->bi_rw = READ | do_sync;
1578 bio->bi_private = r10_bio; 1583 bio->bi_private = r10_bio;
1579 bio->bi_end_io = raid10_end_read_request; 1584 bio->bi_end_io = raid10_end_read_request;
1580 unplug = 1; 1585 unplug = 1;
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 3482e0114d43..2bd84d351a18 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -38,6 +38,7 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/errno.h> 40#include <linux/errno.h>
41#include <linux/freezer.h>
41#include <linux/kernel.h> 42#include <linux/kernel.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43#include <linux/mm.h> 44#include <linux/mm.h>
@@ -961,6 +962,7 @@ int cx88_audio_thread(void *data)
961 msleep_interruptible(1000); 962 msleep_interruptible(1000);
962 if (kthread_should_stop()) 963 if (kthread_should_stop())
963 break; 964 break;
965 try_to_freeze();
964 966
965 /* just monitor the audio status for now ... */ 967 /* just monitor the audio status for now ... */
966 memset(&t, 0, sizeof(t)); 968 memset(&t, 0, sizeof(t));
diff --git a/drivers/media/video/ks0127.c b/drivers/media/video/ks0127.c
index c1a377f797d9..b6cd21e6dab9 100644
--- a/drivers/media/video/ks0127.c
+++ b/drivers/media/video/ks0127.c
@@ -712,13 +712,13 @@ static int ks0127_command(struct i2c_client *client,
712 *iarg = 0; 712 *iarg = 0;
713 status = ks0127_read(ks, KS_STAT); 713 status = ks0127_read(ks, KS_STAT);
714 if (!(status & 0x20)) /* NOVID not set */ 714 if (!(status & 0x20)) /* NOVID not set */
715 *iarg = (*iarg & DECODER_STATUS_GOOD); 715 *iarg = (*iarg | DECODER_STATUS_GOOD);
716 if ((status & 0x01)) /* CLOCK set */ 716 if ((status & 0x01)) /* CLOCK set */
717 *iarg = (*iarg & DECODER_STATUS_COLOR); 717 *iarg = (*iarg | DECODER_STATUS_COLOR);
718 if ((status & 0x08)) /* PALDET set */ 718 if ((status & 0x08)) /* PALDET set */
719 *iarg = (*iarg & DECODER_STATUS_PAL); 719 *iarg = (*iarg | DECODER_STATUS_PAL);
720 else 720 else
721 *iarg = (*iarg & DECODER_STATUS_NTSC); 721 *iarg = (*iarg | DECODER_STATUS_NTSC);
722 break; 722 break;
723 723
724 //Catch any unknown command 724 //Catch any unknown command
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 4dead84aff46..ae984bbe36b6 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -2570,6 +2570,7 @@ struct saa7134_board saa7134_boards[] = {
2570 .radio_type = UNSET, 2570 .radio_type = UNSET,
2571 .tuner_addr = ADDR_UNSET, 2571 .tuner_addr = ADDR_UNSET,
2572 .radio_addr = ADDR_UNSET, 2572 .radio_addr = ADDR_UNSET,
2573 .gpiomask = 1 << 21,
2573 .inputs = {{ 2574 .inputs = {{
2574 .name = name_tv, 2575 .name = name_tv,
2575 .vmux = 1, 2576 .vmux = 1,
@@ -2578,15 +2579,20 @@ struct saa7134_board saa7134_boards[] = {
2578 },{ 2579 },{
2579 .name = name_comp1, 2580 .name = name_comp1,
2580 .vmux = 3, 2581 .vmux = 3,
2581 .amux = LINE1, 2582 .amux = LINE2, /* unconfirmed, taken from Philips driver */
2583 },{
2584 .name = name_comp2,
2585 .vmux = 0, /* untested, Composite over S-Video */
2586 .amux = LINE2,
2582 },{ 2587 },{
2583 .name = name_svideo, 2588 .name = name_svideo,
2584 .vmux = 0, 2589 .vmux = 8,
2585 .amux = LINE1, 2590 .amux = LINE2,
2586 }}, 2591 }},
2587 .radio = { 2592 .radio = {
2588 .name = name_radio, 2593 .name = name_radio,
2589 .amux = LINE1, 2594 .amux = TV,
2595 .gpio = 0x0200000,
2590 }, 2596 },
2591 }, 2597 },
2592 [SAA7134_BOARD_CINERGY250PCI] = { 2598 [SAA7134_BOARD_CINERGY250PCI] = {
diff --git a/drivers/media/video/tveeprom.c b/drivers/media/video/tveeprom.c
index 2624e3f7dd29..4e7c1fa668d3 100644
--- a/drivers/media/video/tveeprom.c
+++ b/drivers/media/video/tveeprom.c
@@ -184,7 +184,7 @@ hauppauge_tuner[] =
184 { TUNER_ABSENT, "Thompson DTT757"}, 184 { TUNER_ABSENT, "Thompson DTT757"},
185 /* 80-89 */ 185 /* 80-89 */
186 { TUNER_ABSENT, "Philips FQ1216LME MK3"}, 186 { TUNER_ABSENT, "Philips FQ1216LME MK3"},
187 { TUNER_ABSENT, "LG TAPC G701D"}, 187 { TUNER_LG_PAL_NEW_TAPC, "LG TAPC G701D"},
188 { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"}, 188 { TUNER_LG_NTSC_NEW_TAPC, "LG TAPC H791F"},
189 { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"}, 189 { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MB 3"},
190 { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MI 3"}, 190 { TUNER_LG_PAL_NEW_TAPC, "TCL 2002MI 3"},
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.h b/drivers/media/video/usbvideo/quickcam_messenger.h
index baab9c081b52..17ace394d981 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.h
+++ b/drivers/media/video/usbvideo/quickcam_messenger.h
@@ -35,27 +35,13 @@ struct rgb {
35}; 35};
36 36
37struct bayL0 { 37struct bayL0 {
38#ifdef __BIG_ENDIAN
39 u8 r;
40 u8 g;
41#elif __LITTLE_ENDIAN
42 u8 g; 38 u8 g;
43 u8 r; 39 u8 r;
44#else
45#error not byte order defined
46#endif
47}; 40};
48 41
49struct bayL1 { 42struct bayL1 {
50#ifdef __BIG_ENDIAN
51 u8 g;
52 u8 b;
53#elif __LITTLE_ENDIAN
54 u8 b; 43 u8 b;
55 u8 g; 44 u8 g;
56#else
57#error not byte order defined
58#endif
59}; 45};
60 46
61struct cam_size { 47struct cam_size {
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c
index 8c7eba2a728e..7243337b771a 100644
--- a/drivers/media/video/usbvision/usbvision-video.c
+++ b/drivers/media/video/usbvision/usbvision-video.c
@@ -1080,7 +1080,6 @@ static ssize_t usbvision_v4l2_read(struct file *file, char *buf,
1080 int noblock = file->f_flags & O_NONBLOCK; 1080 int noblock = file->f_flags & O_NONBLOCK;
1081 unsigned long lock_flags; 1081 unsigned long lock_flags;
1082 1082
1083 int frmx = -1;
1084 int ret,i; 1083 int ret,i;
1085 struct usbvision_frame *frame; 1084 struct usbvision_frame *frame;
1086 1085
@@ -1155,7 +1154,7 @@ static ssize_t usbvision_v4l2_read(struct file *file, char *buf,
1155 frame->bytes_read = 0; 1154 frame->bytes_read = 0;
1156 1155
1157 /* Mark it as available to be used again. */ 1156 /* Mark it as available to be used again. */
1158 usbvision->frame[frmx].grabstate = FrameState_Unused; 1157 frame->grabstate = FrameState_Unused;
1159/* } */ 1158/* } */
1160 1159
1161 return count; 1160 return count;
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 752c82c37f55..b87d571e0463 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -90,8 +90,15 @@ MODULE_LICENSE("GPL");
90char *v4l2_norm_to_name(v4l2_std_id id) 90char *v4l2_norm_to_name(v4l2_std_id id)
91{ 91{
92 char *name; 92 char *name;
93 u32 myid = id;
93 94
94 switch (id) { 95 /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle
96 64 bit comparations. So, on that architecture, with some gcc variants,
97 compilation fails. Currently, the max value is 30bit wide.
98 */
99 BUG_ON(myid != id);
100
101 switch (myid) {
95 case V4L2_STD_PAL: 102 case V4L2_STD_PAL:
96 name="PAL"; break; 103 name="PAL"; break;
97 case V4L2_STD_PAL_BG: 104 case V4L2_STD_PAL_BG:
diff --git a/drivers/media/video/video-buf.c b/drivers/media/video/video-buf.c
index f429f49901b9..635d102c86f0 100644
--- a/drivers/media/video/video-buf.c
+++ b/drivers/media/video/video-buf.c
@@ -1229,7 +1229,7 @@ videobuf_vm_nopage(struct vm_area_struct *vma, unsigned long vaddr,
1229 vaddr,vma->vm_start,vma->vm_end); 1229 vaddr,vma->vm_start,vma->vm_end);
1230 if (vaddr > vma->vm_end) 1230 if (vaddr > vma->vm_end)
1231 return NOPAGE_SIGBUS; 1231 return NOPAGE_SIGBUS;
1232 page = alloc_page(GFP_USER); 1232 page = alloc_page(GFP_USER | __GFP_DMA32);
1233 if (!page) 1233 if (!page)
1234 return NOPAGE_OOM; 1234 return NOPAGE_OOM;
1235 clear_user_page(page_address(page), vaddr, page); 1235 clear_user_page(page_address(page), vaddr, page);
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index bacb311b4f24..d4cf55666731 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -270,10 +270,15 @@ static void gen_line(struct sg_to_addr to_addr[],int inipos,int pages,int wmax,
270 char *p,*s,*basep; 270 char *p,*s,*basep;
271 struct page *pg; 271 struct page *pg;
272 u8 chr,r,g,b,color; 272 u8 chr,r,g,b,color;
273 unsigned long flags;
274 spinlock_t spinlock;
275
276 spin_lock_init(&spinlock);
273 277
274 /* Get first addr pointed to pixel position */ 278 /* Get first addr pointed to pixel position */
275 oldpg=get_addr_pos(pos,pages,to_addr); 279 oldpg=get_addr_pos(pos,pages,to_addr);
276 pg=pfn_to_page(sg_dma_address(to_addr[oldpg].sg) >> PAGE_SHIFT); 280 pg=pfn_to_page(sg_dma_address(to_addr[oldpg].sg) >> PAGE_SHIFT);
281 spin_lock_irqsave(&spinlock,flags);
277 basep = kmap_atomic(pg, KM_BOUNCE_READ)+to_addr[oldpg].sg->offset; 282 basep = kmap_atomic(pg, KM_BOUNCE_READ)+to_addr[oldpg].sg->offset;
278 283
279 /* We will just duplicate the second pixel at the packet */ 284 /* We will just duplicate the second pixel at the packet */
@@ -376,6 +381,8 @@ static void gen_line(struct sg_to_addr to_addr[],int inipos,int pages,int wmax,
376 381
377end: 382end:
378 kunmap_atomic(basep, KM_BOUNCE_READ); 383 kunmap_atomic(basep, KM_BOUNCE_READ);
384 spin_unlock_irqrestore(&spinlock,flags);
385
379} 386}
380static void vivi_fillbuff(struct vivi_dev *dev,struct vivi_buffer *buf) 387static void vivi_fillbuff(struct vivi_dev *dev,struct vivi_buffer *buf)
381{ 388{
diff --git a/drivers/mmc/at91_mci.c b/drivers/mmc/at91_mci.c
index 08a33c33f6ed..aa152f31851e 100644
--- a/drivers/mmc/at91_mci.c
+++ b/drivers/mmc/at91_mci.c
@@ -768,7 +768,7 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
768 return IRQ_HANDLED; 768 return IRQ_HANDLED;
769} 769}
770 770
771int at91_mci_get_ro(struct mmc_host *mmc) 771static int at91_mci_get_ro(struct mmc_host *mmc)
772{ 772{
773 int read_only = 0; 773 int read_only = 0;
774 struct at91mci_host *host = mmc_priv(mmc); 774 struct at91mci_host *host = mmc_priv(mmc);
@@ -794,7 +794,7 @@ static const struct mmc_host_ops at91_mci_ops = {
794/* 794/*
795 * Probe for the device 795 * Probe for the device
796 */ 796 */
797static int at91_mci_probe(struct platform_device *pdev) 797static int __init at91_mci_probe(struct platform_device *pdev)
798{ 798{
799 struct mmc_host *mmc; 799 struct mmc_host *mmc;
800 struct at91mci_host *host; 800 struct at91mci_host *host;
@@ -910,7 +910,7 @@ static int at91_mci_probe(struct platform_device *pdev)
910/* 910/*
911 * Remove a device 911 * Remove a device
912 */ 912 */
913static int at91_mci_remove(struct platform_device *pdev) 913static int __exit at91_mci_remove(struct platform_device *pdev)
914{ 914{
915 struct mmc_host *mmc = platform_get_drvdata(pdev); 915 struct mmc_host *mmc = platform_get_drvdata(pdev);
916 struct at91mci_host *host; 916 struct at91mci_host *host;
@@ -972,8 +972,7 @@ static int at91_mci_resume(struct platform_device *pdev)
972#endif 972#endif
973 973
974static struct platform_driver at91_mci_driver = { 974static struct platform_driver at91_mci_driver = {
975 .probe = at91_mci_probe, 975 .remove = __exit_p(at91_mci_remove),
976 .remove = at91_mci_remove,
977 .suspend = at91_mci_suspend, 976 .suspend = at91_mci_suspend,
978 .resume = at91_mci_resume, 977 .resume = at91_mci_resume,
979 .driver = { 978 .driver = {
@@ -984,7 +983,7 @@ static struct platform_driver at91_mci_driver = {
984 983
985static int __init at91_mci_init(void) 984static int __init at91_mci_init(void)
986{ 985{
987 return platform_driver_register(&at91_mci_driver); 986 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
988} 987}
989 988
990static void __exit at91_mci_exit(void) 989static void __exit at91_mci_exit(void)
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c
index 06e7fcd19221..bfb9ff693208 100644
--- a/drivers/mmc/imxmmc.c
+++ b/drivers/mmc/imxmmc.c
@@ -351,9 +351,6 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd,
351 case MMC_RSP_R3: /* short */ 351 case MMC_RSP_R3: /* short */
352 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3; 352 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
353 break; 353 break;
354 case MMC_RSP_R6: /* short CRC */
355 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R6;
356 break;
357 default: 354 default:
358 break; 355 break;
359 } 356 }
diff --git a/drivers/mmc/mmci.c b/drivers/mmc/mmci.c
index e9b80e920266..ccfe6561be24 100644
--- a/drivers/mmc/mmci.c
+++ b/drivers/mmc/mmci.c
@@ -42,6 +42,8 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
42{ 42{
43 writel(0, host->base + MMCICOMMAND); 43 writel(0, host->base + MMCICOMMAND);
44 44
45 BUG_ON(host->data);
46
45 host->mrq = NULL; 47 host->mrq = NULL;
46 host->cmd = NULL; 48 host->cmd = NULL;
47 49
@@ -198,6 +200,8 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
198 } 200 }
199 201
200 if (!cmd->data || cmd->error != MMC_ERR_NONE) { 202 if (!cmd->data || cmd->error != MMC_ERR_NONE) {
203 if (host->data)
204 mmci_stop_data(host);
201 mmci_request_end(host, cmd->mrq); 205 mmci_request_end(host, cmd->mrq);
202 } else if (!(cmd->data->flags & MMC_DATA_READ)) { 206 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
203 mmci_start_data(host, cmd->data); 207 mmci_start_data(host, cmd->data);
diff --git a/drivers/mmc/omap.c b/drivers/mmc/omap.c
index 435d331e772a..d30540b27614 100644
--- a/drivers/mmc/omap.c
+++ b/drivers/mmc/omap.c
@@ -91,7 +91,6 @@
91 91
92 92
93#define DRIVER_NAME "mmci-omap" 93#define DRIVER_NAME "mmci-omap"
94#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
95 94
96/* Specifies how often in millisecs to poll for card status changes 95/* Specifies how often in millisecs to poll for card status changes
97 * when the cover switch is open */ 96 * when the cover switch is open */
@@ -204,18 +203,22 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)
204 cmdtype = 0; 203 cmdtype = 0;
205 204
206 /* Our hardware needs to know exact type */ 205 /* Our hardware needs to know exact type */
207 switch (RSP_TYPE(mmc_resp_type(cmd))) { 206 switch (mmc_resp_type(cmd)) {
208 case RSP_TYPE(MMC_RSP_R1): 207 case MMC_RSP_NONE:
209 /* resp 1, resp 1b */ 208 break;
209 case MMC_RSP_R1:
210 case MMC_RSP_R1B:
211 /* resp 1, 1b, 6, 7 */
210 resptype = 1; 212 resptype = 1;
211 break; 213 break;
212 case RSP_TYPE(MMC_RSP_R2): 214 case MMC_RSP_R2:
213 resptype = 2; 215 resptype = 2;
214 break; 216 break;
215 case RSP_TYPE(MMC_RSP_R3): 217 case MMC_RSP_R3:
216 resptype = 3; 218 resptype = 3;
217 break; 219 break;
218 default: 220 default:
221 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd));
219 break; 222 break;
220 } 223 }
221 224
@@ -581,9 +584,9 @@ static void mmc_omap_switch_timer(unsigned long arg)
581 schedule_work(&host->switch_work); 584 schedule_work(&host->switch_work);
582} 585}
583 586
584static void mmc_omap_switch_handler(void *data) 587static void mmc_omap_switch_handler(struct work_struct *work)
585{ 588{
586 struct mmc_omap_host *host = (struct mmc_omap_host *) data; 589 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, switch_work);
587 struct mmc_card *card; 590 struct mmc_card *card;
588 static int complained = 0; 591 static int complained = 0;
589 int cards = 0, cover_open; 592 int cards = 0, cover_open;
@@ -1116,7 +1119,7 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1116 platform_set_drvdata(pdev, host); 1119 platform_set_drvdata(pdev, host);
1117 1120
1118 if (host->switch_pin >= 0) { 1121 if (host->switch_pin >= 0) {
1119 INIT_WORK(&host->switch_work, mmc_omap_switch_handler, host); 1122 INIT_WORK(&host->switch_work, mmc_omap_switch_handler);
1120 init_timer(&host->switch_timer); 1123 init_timer(&host->switch_timer);
1121 host->switch_timer.function = mmc_omap_switch_timer; 1124 host->switch_timer.function = mmc_omap_switch_timer;
1122 host->switch_timer.data = (unsigned long) host; 1125 host->switch_timer.data = (unsigned long) host;
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index 45a9283ce498..6073d998b11f 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -171,7 +171,7 @@ static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd,
171 171
172#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE)) 172#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
173 switch (RSP_TYPE(mmc_resp_type(cmd))) { 173 switch (RSP_TYPE(mmc_resp_type(cmd))) {
174 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6 */ 174 case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
175 cmdat |= CMDAT_RESP_SHORT; 175 cmdat |= CMDAT_RESP_SHORT;
176 break; 176 break;
177 case RSP_TYPE(MMC_RSP_R3): 177 case RSP_TYPE(MMC_RSP_R3):
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
index f18ad998b3cb..fa4a52886b97 100644
--- a/drivers/mmc/tifm_sd.c
+++ b/drivers/mmc/tifm_sd.c
@@ -173,9 +173,6 @@ static unsigned int tifm_sd_op_flags(struct mmc_command *cmd)
173 case MMC_RSP_R3: 173 case MMC_RSP_R3:
174 rc |= TIFM_MMCSD_RSP_R3; 174 rc |= TIFM_MMCSD_RSP_R3;
175 break; 175 break;
176 case MMC_RSP_R6:
177 rc |= TIFM_MMCSD_RSP_R6;
178 break;
179 default: 176 default:
180 BUG(); 177 BUG();
181 } 178 }
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 602ed31a5dd9..9305eb9b1b98 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -349,22 +349,11 @@ static void __init trif_probe2(int unit)
349#endif 349#endif
350 350
351 351
352/*
353 * The loopback device is global so it can be directly referenced
354 * by the network code. Also, it must be first on device list.
355 */
356extern int loopback_init(void);
357
358/* Statically configured drivers -- order matters here. */ 352/* Statically configured drivers -- order matters here. */
359static int __init net_olddevs_init(void) 353static int __init net_olddevs_init(void)
360{ 354{
361 int num; 355 int num;
362 356
363 if (loopback_init()) {
364 printk(KERN_ERR "Network loopback device setup failed\n");
365 }
366
367
368#ifdef CONFIG_SBNI 357#ifdef CONFIG_SBNI
369 for (num = 0; num < 8; ++num) 358 for (num = 0; num < 8; ++num)
370 sbni_probe(num); 359 sbni_probe(num);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index ada5e9b9988c..ca5acc4736df 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -57,8 +57,8 @@
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": " 59#define PFX DRV_MODULE_NAME ": "
60#define DRV_MODULE_VERSION "1.5.2" 60#define DRV_MODULE_VERSION "1.5.3"
61#define DRV_MODULE_RELDATE "December 13, 2006" 61#define DRV_MODULE_RELDATE "January 8, 2007"
62 62
63#define RUN_AT(x) (jiffies + (x)) 63#define RUN_AT(x) (jiffies + (x))
64 64
@@ -1345,8 +1345,6 @@ bnx2_init_copper_phy(struct bnx2 *bp)
1345{ 1345{
1346 u32 val; 1346 u32 val;
1347 1347
1348 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1349
1350 if (bp->phy_flags & PHY_CRC_FIX_FLAG) { 1348 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1351 bnx2_write_phy(bp, 0x18, 0x0c00); 1349 bnx2_write_phy(bp, 0x18, 0x0c00);
1352 bnx2_write_phy(bp, 0x17, 0x000a); 1350 bnx2_write_phy(bp, 0x17, 0x000a);
@@ -3085,7 +3083,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3085 int buf_size) 3083 int buf_size)
3086{ 3084{
3087 u32 written, offset32, len32; 3085 u32 written, offset32, len32;
3088 u8 *buf, start[4], end[4], *flash_buffer = NULL; 3086 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3089 int rc = 0; 3087 int rc = 0;
3090 int align_start, align_end; 3088 int align_start, align_end;
3091 3089
@@ -3113,16 +3111,17 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3113 } 3111 }
3114 3112
3115 if (align_start || align_end) { 3113 if (align_start || align_end) {
3116 buf = kmalloc(len32, GFP_KERNEL); 3114 align_buf = kmalloc(len32, GFP_KERNEL);
3117 if (buf == NULL) 3115 if (align_buf == NULL)
3118 return -ENOMEM; 3116 return -ENOMEM;
3119 if (align_start) { 3117 if (align_start) {
3120 memcpy(buf, start, 4); 3118 memcpy(align_buf, start, 4);
3121 } 3119 }
3122 if (align_end) { 3120 if (align_end) {
3123 memcpy(buf + len32 - 4, end, 4); 3121 memcpy(align_buf + len32 - 4, end, 4);
3124 } 3122 }
3125 memcpy(buf + align_start, data_buf, buf_size); 3123 memcpy(align_buf + align_start, data_buf, buf_size);
3124 buf = align_buf;
3126 } 3125 }
3127 3126
3128 if (bp->flash_info->buffered == 0) { 3127 if (bp->flash_info->buffered == 0) {
@@ -3256,11 +3255,8 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3256 } 3255 }
3257 3256
3258nvram_write_end: 3257nvram_write_end:
3259 if (bp->flash_info->buffered == 0) 3258 kfree(flash_buffer);
3260 kfree(flash_buffer); 3259 kfree(align_buf);
3261
3262 if (align_start || align_end)
3263 kfree(buf);
3264 return rc; 3260 return rc;
3265} 3261}
3266 3262
@@ -5645,6 +5641,44 @@ poll_bnx2(struct net_device *dev)
5645} 5641}
5646#endif 5642#endif
5647 5643
5644static void __devinit
5645bnx2_get_5709_media(struct bnx2 *bp)
5646{
5647 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5648 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5649 u32 strap;
5650
5651 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5652 return;
5653 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5654 bp->phy_flags |= PHY_SERDES_FLAG;
5655 return;
5656 }
5657
5658 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5659 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5660 else
5661 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5662
5663 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5664 switch (strap) {
5665 case 0x4:
5666 case 0x5:
5667 case 0x6:
5668 bp->phy_flags |= PHY_SERDES_FLAG;
5669 return;
5670 }
5671 } else {
5672 switch (strap) {
5673 case 0x1:
5674 case 0x2:
5675 case 0x4:
5676 bp->phy_flags |= PHY_SERDES_FLAG;
5677 return;
5678 }
5679 }
5680}
5681
5648static int __devinit 5682static int __devinit
5649bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) 5683bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5650{ 5684{
@@ -5865,10 +5899,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5865 bp->phy_addr = 1; 5899 bp->phy_addr = 1;
5866 5900
5867 /* Disable WOL support if we are running on a SERDES chip. */ 5901 /* Disable WOL support if we are running on a SERDES chip. */
5868 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 5902 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5869 if (CHIP_BOND_ID(bp) != BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) 5903 bnx2_get_5709_media(bp);
5870 bp->phy_flags |= PHY_SERDES_FLAG; 5904 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5871 } else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
5872 bp->phy_flags |= PHY_SERDES_FLAG; 5905 bp->phy_flags |= PHY_SERDES_FLAG;
5873 5906
5874 if (bp->phy_flags & PHY_SERDES_FLAG) { 5907 if (bp->phy_flags & PHY_SERDES_FLAG) {
@@ -5880,7 +5913,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5880 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) 5913 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5881 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG; 5914 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5882 } 5915 }
5883 } 5916 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
5917 CHIP_NUM(bp) == CHIP_NUM_5708)
5918 bp->phy_flags |= PHY_CRC_FIX_FLAG;
5884 5919
5885 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || 5920 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5886 (CHIP_ID(bp) == CHIP_ID_5708_B0) || 5921 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index c7731b6f9de3..82fed1dd5005 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -170,9 +170,10 @@ static struct cphy *my3126_phy_create(adapter_t *adapter,
170{ 170{
171 struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); 171 struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL);
172 172
173 if (cphy) 173 if (!cphy)
174 cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); 174 return NULL;
175 175
176 cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops);
176 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); 177 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll);
177 cphy->bmsr = 0; 178 cphy->bmsr = 0;
178 179
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4c1ff752048c..c6259c7127f6 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -995,12 +995,6 @@ e1000_probe(struct pci_dev *pdev,
995 (adapter->hw.mac_type != e1000_82547)) 995 (adapter->hw.mac_type != e1000_82547))
996 netdev->features |= NETIF_F_TSO; 996 netdev->features |= NETIF_F_TSO;
997 997
998#ifdef CONFIG_DEBUG_SLAB
999 /* 82544's work arounds do not play nicely with DEBUG SLAB */
1000 if (adapter->hw.mac_type == e1000_82544)
1001 netdev->features &= ~NETIF_F_TSO;
1002#endif
1003
1004#ifdef NETIF_F_TSO6 998#ifdef NETIF_F_TSO6
1005 if (adapter->hw.mac_type > e1000_82547_rev_2) 999 if (adapter->hw.mac_type > e1000_82547_rev_2)
1006 netdev->features |= NETIF_F_TSO6; 1000 netdev->features |= NETIF_F_TSO6;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 2f48fe9a29a7..93f2b7a22160 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -234,6 +234,7 @@ enum {
234#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 234#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
235#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 235#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
236#define NVREG_XMITCTL_HOST_LOADED 0x00004000 236#define NVREG_XMITCTL_HOST_LOADED 0x00004000
237#define NVREG_XMITCTL_TX_PATH_EN 0x01000000
237 NvRegTransmitterStatus = 0x088, 238 NvRegTransmitterStatus = 0x088,
238#define NVREG_XMITSTAT_BUSY 0x01 239#define NVREG_XMITSTAT_BUSY 0x01
239 240
@@ -249,6 +250,7 @@ enum {
249#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 250#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
250 NvRegReceiverControl = 0x094, 251 NvRegReceiverControl = 0x094,
251#define NVREG_RCVCTL_START 0x01 252#define NVREG_RCVCTL_START 0x01
253#define NVREG_RCVCTL_RX_PATH_EN 0x01000000
252 NvRegReceiverStatus = 0x98, 254 NvRegReceiverStatus = 0x98,
253#define NVREG_RCVSTAT_BUSY 0x01 255#define NVREG_RCVSTAT_BUSY 0x01
254 256
@@ -1169,16 +1171,21 @@ static void nv_start_rx(struct net_device *dev)
1169{ 1171{
1170 struct fe_priv *np = netdev_priv(dev); 1172 struct fe_priv *np = netdev_priv(dev);
1171 u8 __iomem *base = get_hwbase(dev); 1173 u8 __iomem *base = get_hwbase(dev);
1174 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1172 1175
1173 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1176 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1174 /* Already running? Stop it. */ 1177 /* Already running? Stop it. */
1175 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 1178 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1176 writel(0, base + NvRegReceiverControl); 1179 rx_ctrl &= ~NVREG_RCVCTL_START;
1180 writel(rx_ctrl, base + NvRegReceiverControl);
1177 pci_push(base); 1181 pci_push(base);
1178 } 1182 }
1179 writel(np->linkspeed, base + NvRegLinkSpeed); 1183 writel(np->linkspeed, base + NvRegLinkSpeed);
1180 pci_push(base); 1184 pci_push(base);
1181 writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); 1185 rx_ctrl |= NVREG_RCVCTL_START;
1186 if (np->mac_in_use)
1187 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1188 writel(rx_ctrl, base + NvRegReceiverControl);
1182 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1189 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1183 dev->name, np->duplex, np->linkspeed); 1190 dev->name, np->duplex, np->linkspeed);
1184 pci_push(base); 1191 pci_push(base);
@@ -1186,39 +1193,59 @@ static void nv_start_rx(struct net_device *dev)
1186 1193
1187static void nv_stop_rx(struct net_device *dev) 1194static void nv_stop_rx(struct net_device *dev)
1188{ 1195{
1196 struct fe_priv *np = netdev_priv(dev);
1189 u8 __iomem *base = get_hwbase(dev); 1197 u8 __iomem *base = get_hwbase(dev);
1198 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1190 1199
1191 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1200 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1192 writel(0, base + NvRegReceiverControl); 1201 if (!np->mac_in_use)
1202 rx_ctrl &= ~NVREG_RCVCTL_START;
1203 else
1204 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1205 writel(rx_ctrl, base + NvRegReceiverControl);
1193 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1206 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1194 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1207 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1195 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1208 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1196 1209
1197 udelay(NV_RXSTOP_DELAY2); 1210 udelay(NV_RXSTOP_DELAY2);
1198 writel(0, base + NvRegLinkSpeed); 1211 if (!np->mac_in_use)
1212 writel(0, base + NvRegLinkSpeed);
1199} 1213}
1200 1214
1201static void nv_start_tx(struct net_device *dev) 1215static void nv_start_tx(struct net_device *dev)
1202{ 1216{
1217 struct fe_priv *np = netdev_priv(dev);
1203 u8 __iomem *base = get_hwbase(dev); 1218 u8 __iomem *base = get_hwbase(dev);
1219 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1204 1220
1205 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1221 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1206 writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); 1222 tx_ctrl |= NVREG_XMITCTL_START;
1223 if (np->mac_in_use)
1224 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1225 writel(tx_ctrl, base + NvRegTransmitterControl);
1207 pci_push(base); 1226 pci_push(base);
1208} 1227}
1209 1228
1210static void nv_stop_tx(struct net_device *dev) 1229static void nv_stop_tx(struct net_device *dev)
1211{ 1230{
1231 struct fe_priv *np = netdev_priv(dev);
1212 u8 __iomem *base = get_hwbase(dev); 1232 u8 __iomem *base = get_hwbase(dev);
1233 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1213 1234
1214 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1235 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1215 writel(0, base + NvRegTransmitterControl); 1236 if (!np->mac_in_use)
1237 tx_ctrl &= ~NVREG_XMITCTL_START;
1238 else
1239 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1240 writel(tx_ctrl, base + NvRegTransmitterControl);
1216 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1241 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1217 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1242 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1218 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1243 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1219 1244
1220 udelay(NV_TXSTOP_DELAY2); 1245 udelay(NV_TXSTOP_DELAY2);
1221 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 1246 if (!np->mac_in_use)
1247 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1248 base + NvRegTransmitPoll);
1222} 1249}
1223 1250
1224static void nv_txrx_reset(struct net_device *dev) 1251static void nv_txrx_reset(struct net_device *dev)
@@ -4148,20 +4175,6 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
4148 return 0; 4175 return 0;
4149} 4176}
4150 4177
4151/* Indicate to mgmt unit whether driver is loaded or not */
4152static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded)
4153{
4154 u8 __iomem *base = get_hwbase(dev);
4155 u32 tx_ctrl;
4156
4157 tx_ctrl = readl(base + NvRegTransmitterControl);
4158 if (loaded)
4159 tx_ctrl |= NVREG_XMITCTL_HOST_LOADED;
4160 else
4161 tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED;
4162 writel(tx_ctrl, base + NvRegTransmitterControl);
4163}
4164
4165static int nv_open(struct net_device *dev) 4178static int nv_open(struct net_device *dev)
4166{ 4179{
4167 struct fe_priv *np = netdev_priv(dev); 4180 struct fe_priv *np = netdev_priv(dev);
@@ -4659,33 +4672,24 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4659 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4672 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
4660 4673
4661 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 4674 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
4662 writel(0x1, base + 0x204); pci_push(base);
4663 msleep(500);
4664 /* management unit running on the mac? */ 4675 /* management unit running on the mac? */
4665 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 4676 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
4666 if (np->mac_in_use) { 4677 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
4667 u32 mgmt_sync; 4678 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
4668 /* management unit setup the phy already? */ 4679 for (i = 0; i < 5000; i++) {
4669 mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; 4680 msleep(1);
4670 if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) { 4681 if (nv_mgmt_acquire_sema(dev)) {
4671 if (!nv_mgmt_acquire_sema(dev)) { 4682 /* management unit setup the phy already? */
4672 for (i = 0; i < 5000; i++) { 4683 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
4673 msleep(1); 4684 NVREG_XMITCTL_SYNC_PHY_INIT) {
4674 mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; 4685 /* phy is inited by mgmt unit */
4675 if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) 4686 phyinitialized = 1;
4676 continue; 4687 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
4677 if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) 4688 } else {
4678 phyinitialized = 1; 4689 /* we need to init the phy */
4679 break;
4680 } 4690 }
4681 } else { 4691 break;
4682 /* we need to init the phy */
4683 } 4692 }
4684 } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) {
4685 /* phy is inited by SMU */
4686 phyinitialized = 1;
4687 } else {
4688 /* we need to init the phy */
4689 } 4693 }
4690 } 4694 }
4691 } 4695 }
@@ -4724,10 +4728,12 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4724 if (!phyinitialized) { 4728 if (!phyinitialized) {
4725 /* reset it */ 4729 /* reset it */
4726 phy_init(dev); 4730 phy_init(dev);
4727 } 4731 } else {
4728 4732 /* see if it is a gigabit phy */
4729 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 4733 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4730 nv_mgmt_driver_loaded(dev, 1); 4734 if (mii_status & PHY_GIGABIT) {
4735 np->gigabit = PHY_GIGABIT;
4736 }
4731 } 4737 }
4732 4738
4733 /* set default link speed settings */ 4739 /* set default link speed settings */
@@ -4749,8 +4755,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4749out_error: 4755out_error:
4750 if (phystate_orig) 4756 if (phystate_orig)
4751 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 4757 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
4752 if (np->mac_in_use)
4753 nv_mgmt_driver_loaded(dev, 0);
4754 pci_set_drvdata(pci_dev, NULL); 4758 pci_set_drvdata(pci_dev, NULL);
4755out_freering: 4759out_freering:
4756 free_rings(dev); 4760 free_rings(dev);
@@ -4780,9 +4784,6 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
4780 writel(np->orig_mac[0], base + NvRegMacAddrA); 4784 writel(np->orig_mac[0], base + NvRegMacAddrA);
4781 writel(np->orig_mac[1], base + NvRegMacAddrB); 4785 writel(np->orig_mac[1], base + NvRegMacAddrB);
4782 4786
4783 if (np->mac_in_use)
4784 nv_mgmt_driver_loaded(dev, 0);
4785
4786 /* free all structures */ 4787 /* free all structures */
4787 free_rings(dev); 4788 free_rings(dev);
4788 iounmap(get_hwbase(dev)); 4789 iounmap(get_hwbase(dev));
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index c26a4b8e552a..ca2b21f9d444 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -154,8 +154,8 @@ static int ifb_xmit(struct sk_buff *skb, struct net_device *dev)
154 int ret = 0; 154 int ret = 0;
155 u32 from = G_TC_FROM(skb->tc_verd); 155 u32 from = G_TC_FROM(skb->tc_verd);
156 156
157 stats->tx_packets++; 157 stats->rx_packets++;
158 stats->tx_bytes+=skb->len; 158 stats->rx_bytes+=skb->len;
159 159
160 if (!from || !skb->input_dev) { 160 if (!from || !skb->input_dev) {
161dropped: 161dropped:
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 50ffe90488ff..f4aba4355b19 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -171,6 +171,7 @@ struct ixgb_adapter {
171 171
172 /* TX */ 172 /* TX */
173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; 173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
174 unsigned int restart_queue;
174 unsigned long timeo_start; 175 unsigned long timeo_start;
175 uint32_t tx_cmd_type; 176 uint32_t tx_cmd_type;
176 uint64_t hw_csum_tx_good; 177 uint64_t hw_csum_tx_good;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index cd22523fb035..82c044d6e08a 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -79,6 +79,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, 79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
80 {"tx_deferred_ok", IXGB_STAT(stats.dc)}, 80 {"tx_deferred_ok", IXGB_STAT(stats.dc)},
81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, 81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
82 {"tx_restart_queue", IXGB_STAT(restart_queue) },
82 {"rx_long_length_errors", IXGB_STAT(stats.roc)}, 83 {"rx_long_length_errors", IXGB_STAT(stats.roc)},
83 {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, 84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
84#ifdef NETIF_F_TSO 85#ifdef NETIF_F_TSO
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 02089b64e42c..ecbf45861c68 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -399,8 +399,9 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
399 /* Zero out the other 15 receive addresses. */ 399 /* Zero out the other 15 receive addresses. */
400 DEBUGOUT("Clearing RAR[1-15]\n"); 400 DEBUGOUT("Clearing RAR[1-15]\n");
401 for(i = 1; i < IXGB_RAR_ENTRIES; i++) { 401 for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
402 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 402 /* Write high reg first to disable the AV bit first */
403 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 403 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
404 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
404 } 405 }
405 406
406 return; 407 return;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index e628126c9c49..a083a9189230 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "1.0.117-k2"DRIVERNAPI 39#define DRV_VERSION "1.0.126-k2"DRIVERNAPI
40char ixgb_driver_version[] = DRV_VERSION; 40char ixgb_driver_version[] = DRV_VERSION;
41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -1287,6 +1287,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1287 struct ixgb_buffer *buffer_info; 1287 struct ixgb_buffer *buffer_info;
1288 int len = skb->len; 1288 int len = skb->len;
1289 unsigned int offset = 0, size, count = 0, i; 1289 unsigned int offset = 0, size, count = 0, i;
1290 unsigned int mss = skb_shinfo(skb)->gso_size;
1290 1291
1291 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1292 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1292 unsigned int f; 1293 unsigned int f;
@@ -1298,6 +1299,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1298 while(len) { 1299 while(len) {
1299 buffer_info = &tx_ring->buffer_info[i]; 1300 buffer_info = &tx_ring->buffer_info[i];
1300 size = min(len, IXGB_MAX_DATA_PER_TXD); 1301 size = min(len, IXGB_MAX_DATA_PER_TXD);
1302 /* Workaround for premature desc write-backs
1303 * in TSO mode. Append 4-byte sentinel desc */
1304 if (unlikely(mss && !nr_frags && size == len && size > 8))
1305 size -= 4;
1306
1301 buffer_info->length = size; 1307 buffer_info->length = size;
1302 WARN_ON(buffer_info->dma != 0); 1308 WARN_ON(buffer_info->dma != 0);
1303 buffer_info->dma = 1309 buffer_info->dma =
@@ -1324,6 +1330,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1324 while(len) { 1330 while(len) {
1325 buffer_info = &tx_ring->buffer_info[i]; 1331 buffer_info = &tx_ring->buffer_info[i];
1326 size = min(len, IXGB_MAX_DATA_PER_TXD); 1332 size = min(len, IXGB_MAX_DATA_PER_TXD);
1333
1334 /* Workaround for premature desc write-backs
1335 * in TSO mode. Append 4-byte sentinel desc */
1336 if (unlikely(mss && !nr_frags && size == len
1337 && size > 8))
1338 size -= 4;
1339
1327 buffer_info->length = size; 1340 buffer_info->length = size;
1328 buffer_info->dma = 1341 buffer_info->dma =
1329 pci_map_page(adapter->pdev, 1342 pci_map_page(adapter->pdev,
@@ -1398,11 +1411,43 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1398 IXGB_WRITE_REG(&adapter->hw, TDT, i); 1411 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1399} 1412}
1400 1413
1414static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1415{
1416 struct ixgb_adapter *adapter = netdev_priv(netdev);
1417 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1418
1419 netif_stop_queue(netdev);
1420 /* Herbert's original patch had:
1421 * smp_mb__after_netif_stop_queue();
1422 * but since that doesn't exist yet, just open code it. */
1423 smp_mb();
1424
1425 /* We need to check again in a case another CPU has just
1426 * made room available. */
1427 if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1428 return -EBUSY;
1429
1430 /* A reprieve! */
1431 netif_start_queue(netdev);
1432 ++adapter->restart_queue;
1433 return 0;
1434}
1435
1436static int ixgb_maybe_stop_tx(struct net_device *netdev,
1437 struct ixgb_desc_ring *tx_ring, int size)
1438{
1439 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1440 return 0;
1441 return __ixgb_maybe_stop_tx(netdev, size);
1442}
1443
1444
1401/* Tx Descriptors needed, worst case */ 1445/* Tx Descriptors needed, worst case */
1402#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ 1446#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1403 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 1447 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1404#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \ 1448#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1405 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 1449 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1450 + 1 /* one more needed for sentinel TSO workaround */
1406 1451
1407static int 1452static int
1408ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1453ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -1430,7 +1475,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1430 spin_lock_irqsave(&adapter->tx_lock, flags); 1475 spin_lock_irqsave(&adapter->tx_lock, flags);
1431#endif 1476#endif
1432 1477
1433 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { 1478 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1479 DESC_NEEDED))) {
1434 netif_stop_queue(netdev); 1480 netif_stop_queue(netdev);
1435 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1481 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1436 return NETDEV_TX_BUSY; 1482 return NETDEV_TX_BUSY;
@@ -1468,8 +1514,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1468 1514
1469#ifdef NETIF_F_LLTX 1515#ifdef NETIF_F_LLTX
1470 /* Make sure there is space in the ring for the next send. */ 1516 /* Make sure there is space in the ring for the next send. */
1471 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) 1517 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1472 netif_stop_queue(netdev);
1473 1518
1474 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1519 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1475 1520
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 82c10dec1b5a..2b739fd584f1 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -229,9 +229,11 @@ struct net_device loopback_dev = {
229}; 229};
230 230
231/* Setup and register the loopback device. */ 231/* Setup and register the loopback device. */
232int __init loopback_init(void) 232static int __init loopback_init(void)
233{ 233{
234 return register_netdev(&loopback_dev); 234 return register_netdev(&loopback_dev);
235}; 235};
236 236
237module_init(loopback_init);
238
237EXPORT_SYMBOL(loopback_dev); 239EXPORT_SYMBOL(loopback_dev);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 794cc61819dd..448bf4a78016 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -281,7 +281,6 @@ static int tc574_probe(struct pcmcia_device *link)
281 link->conf.Attributes = CONF_ENABLE_IRQ; 281 link->conf.Attributes = CONF_ENABLE_IRQ;
282 link->conf.IntType = INT_MEMORY_AND_IO; 282 link->conf.IntType = INT_MEMORY_AND_IO;
283 link->conf.ConfigIndex = 1; 283 link->conf.ConfigIndex = 1;
284 link->conf.Present = PRESENT_OPTION;
285 284
286 /* The EL3-specific entries in the device structure. */ 285 /* The EL3-specific entries in the device structure. */
287 dev->hard_start_xmit = &el3_start_xmit; 286 dev->hard_start_xmit = &el3_start_xmit;
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 1e73ff7d5d8e..342f4062de0b 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -195,7 +195,6 @@ static int tc589_probe(struct pcmcia_device *link)
195 link->conf.Attributes = CONF_ENABLE_IRQ; 195 link->conf.Attributes = CONF_ENABLE_IRQ;
196 link->conf.IntType = INT_MEMORY_AND_IO; 196 link->conf.IntType = INT_MEMORY_AND_IO;
197 link->conf.ConfigIndex = 1; 197 link->conf.ConfigIndex = 1;
198 link->conf.Present = PRESENT_OPTION;
199 198
200 /* The EL3-specific entries in the device structure. */ 199 /* The EL3-specific entries in the device structure. */
201 SET_MODULE_OWNER(dev); 200 SET_MODULE_OWNER(dev);
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 91f65e91cd5f..0d1c7a41c9c6 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -173,7 +173,6 @@ static int com20020_probe(struct pcmcia_device *p_dev)
173 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; 173 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
174 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 174 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
175 p_dev->conf.IntType = INT_MEMORY_AND_IO; 175 p_dev->conf.IntType = INT_MEMORY_AND_IO;
176 p_dev->conf.Present = PRESENT_OPTION;
177 176
178 p_dev->irq.Instance = info->dev = dev; 177 p_dev->irq.Instance = info->dev = dev;
179 p_dev->priv = info; 178 p_dev->priv = info;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2b1238e2dbdb..d88e9b2e93cf 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1617,6 +1617,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1617 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), 1617 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9),
1618 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), 1618 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2),
1619 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), 1619 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2),
1620 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88),
1620 PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), 1621 PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04),
1621 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), 1622 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d),
1622 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), 1623 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814),
@@ -1667,6 +1668,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1667 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), 1668 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737),
1668 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), 1669 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee),
1669 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), 1670 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922),
1671 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c),
1670 PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), 1672 PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0),
1671 PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), 1673 PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578),
1672 PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307), 1674 PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307),
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 8478dca3d8d1..5879e7c36988 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -576,7 +576,6 @@ xirc2ps_probe(struct pcmcia_device *link)
576 link->conf.Attributes = CONF_ENABLE_IRQ; 576 link->conf.Attributes = CONF_ENABLE_IRQ;
577 link->conf.IntType = INT_MEMORY_AND_IO; 577 link->conf.IntType = INT_MEMORY_AND_IO;
578 link->conf.ConfigIndex = 1; 578 link->conf.ConfigIndex = 1;
579 link->conf.Present = PRESENT_OPTION;
580 link->irq.Handler = xirc2ps_interrupt; 579 link->irq.Handler = xirc2ps_interrupt;
581 link->irq.Instance = dev; 580 link->irq.Instance = dev;
582 581
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index d79d141a601d..8844c20eac2d 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -208,6 +208,15 @@ static void ql_write_common_reg(struct ql3_adapter *qdev,
208 return; 208 return;
209} 209}
210 210
211static void ql_write_nvram_reg(struct ql3_adapter *qdev,
212 u32 __iomem *reg, u32 value)
213{
214 writel(value, reg);
215 readl(reg);
216 udelay(1);
217 return;
218}
219
211static void ql_write_page0_reg(struct ql3_adapter *qdev, 220static void ql_write_page0_reg(struct ql3_adapter *qdev,
212 u32 __iomem *reg, u32 value) 221 u32 __iomem *reg, u32 value)
213{ 222{
@@ -336,9 +345,9 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
336 qdev->mem_map_registers; 345 qdev->mem_map_registers;
337 346
338 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 347 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
339 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 348 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
340 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 349 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
341 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 350 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
342 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 351 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
343} 352}
344 353
@@ -355,14 +364,14 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
355 qdev->mem_map_registers; 364 qdev->mem_map_registers;
356 365
357 /* Clock in a zero, then do the start bit */ 366 /* Clock in a zero, then do the start bit */
358 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 367 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
359 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 368 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
360 AUBURN_EEPROM_DO_1); 369 AUBURN_EEPROM_DO_1);
361 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 370 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
362 ISP_NVRAM_MASK | qdev-> 371 ISP_NVRAM_MASK | qdev->
363 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 372 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
364 AUBURN_EEPROM_CLK_RISE); 373 AUBURN_EEPROM_CLK_RISE);
365 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 374 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
366 ISP_NVRAM_MASK | qdev-> 375 ISP_NVRAM_MASK | qdev->
367 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 376 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
368 AUBURN_EEPROM_CLK_FALL); 377 AUBURN_EEPROM_CLK_FALL);
@@ -378,20 +387,20 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
378 * If the bit changed, then change the DO state to 387 * If the bit changed, then change the DO state to
379 * match 388 * match
380 */ 389 */
381 ql_write_common_reg(qdev, 390 ql_write_nvram_reg(qdev,
382 &port_regs->CommonRegs. 391 &port_regs->CommonRegs.
383 serialPortInterfaceReg, 392 serialPortInterfaceReg,
384 ISP_NVRAM_MASK | qdev-> 393 ISP_NVRAM_MASK | qdev->
385 eeprom_cmd_data | dataBit); 394 eeprom_cmd_data | dataBit);
386 previousBit = dataBit; 395 previousBit = dataBit;
387 } 396 }
388 ql_write_common_reg(qdev, 397 ql_write_nvram_reg(qdev,
389 &port_regs->CommonRegs. 398 &port_regs->CommonRegs.
390 serialPortInterfaceReg, 399 serialPortInterfaceReg,
391 ISP_NVRAM_MASK | qdev-> 400 ISP_NVRAM_MASK | qdev->
392 eeprom_cmd_data | dataBit | 401 eeprom_cmd_data | dataBit |
393 AUBURN_EEPROM_CLK_RISE); 402 AUBURN_EEPROM_CLK_RISE);
394 ql_write_common_reg(qdev, 403 ql_write_nvram_reg(qdev,
395 &port_regs->CommonRegs. 404 &port_regs->CommonRegs.
396 serialPortInterfaceReg, 405 serialPortInterfaceReg,
397 ISP_NVRAM_MASK | qdev-> 406 ISP_NVRAM_MASK | qdev->
@@ -412,20 +421,20 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
412 * If the bit changed, then change the DO state to 421 * If the bit changed, then change the DO state to
413 * match 422 * match
414 */ 423 */
415 ql_write_common_reg(qdev, 424 ql_write_nvram_reg(qdev,
416 &port_regs->CommonRegs. 425 &port_regs->CommonRegs.
417 serialPortInterfaceReg, 426 serialPortInterfaceReg,
418 ISP_NVRAM_MASK | qdev-> 427 ISP_NVRAM_MASK | qdev->
419 eeprom_cmd_data | dataBit); 428 eeprom_cmd_data | dataBit);
420 previousBit = dataBit; 429 previousBit = dataBit;
421 } 430 }
422 ql_write_common_reg(qdev, 431 ql_write_nvram_reg(qdev,
423 &port_regs->CommonRegs. 432 &port_regs->CommonRegs.
424 serialPortInterfaceReg, 433 serialPortInterfaceReg,
425 ISP_NVRAM_MASK | qdev-> 434 ISP_NVRAM_MASK | qdev->
426 eeprom_cmd_data | dataBit | 435 eeprom_cmd_data | dataBit |
427 AUBURN_EEPROM_CLK_RISE); 436 AUBURN_EEPROM_CLK_RISE);
428 ql_write_common_reg(qdev, 437 ql_write_nvram_reg(qdev,
429 &port_regs->CommonRegs. 438 &port_regs->CommonRegs.
430 serialPortInterfaceReg, 439 serialPortInterfaceReg,
431 ISP_NVRAM_MASK | qdev-> 440 ISP_NVRAM_MASK | qdev->
@@ -443,7 +452,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev)
443 struct ql3xxx_port_registers __iomem *port_regs = 452 struct ql3xxx_port_registers __iomem *port_regs =
444 qdev->mem_map_registers; 453 qdev->mem_map_registers;
445 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 454 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
446 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 455 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
447 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 456 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
448} 457}
449 458
@@ -461,12 +470,12 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
461 /* Read the data bits */ 470 /* Read the data bits */
462 /* The first bit is a dummy. Clock right over it. */ 471 /* The first bit is a dummy. Clock right over it. */
463 for (i = 0; i < dataBits; i++) { 472 for (i = 0; i < dataBits; i++) {
464 ql_write_common_reg(qdev, 473 ql_write_nvram_reg(qdev,
465 &port_regs->CommonRegs. 474 &port_regs->CommonRegs.
466 serialPortInterfaceReg, 475 serialPortInterfaceReg,
467 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 476 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
468 AUBURN_EEPROM_CLK_RISE); 477 AUBURN_EEPROM_CLK_RISE);
469 ql_write_common_reg(qdev, 478 ql_write_nvram_reg(qdev,
470 &port_regs->CommonRegs. 479 &port_regs->CommonRegs.
471 serialPortInterfaceReg, 480 serialPortInterfaceReg,
472 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 481 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
@@ -3370,7 +3379,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3370 SET_MODULE_OWNER(ndev); 3379 SET_MODULE_OWNER(ndev);
3371 SET_NETDEV_DEV(ndev, &pdev->dev); 3380 SET_NETDEV_DEV(ndev, &pdev->dev);
3372 3381
3373 ndev->features = NETIF_F_LLTX;
3374 if (pci_using_dac) 3382 if (pci_using_dac)
3375 ndev->features |= NETIF_F_HIGHDMA; 3383 ndev->features |= NETIF_F_HIGHDMA;
3376 3384
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 785e4a535f9e..616be8d0fa85 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -90,7 +90,8 @@
90 90
91#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 91#define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
92 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 92 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
93 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) 93 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \
94 SUPPORTED_Pause | SUPPORTED_Autoneg)
94 95
95#define DRV_NAME "sungem" 96#define DRV_NAME "sungem"
96#define DRV_VERSION "0.98" 97#define DRV_VERSION "0.98"
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index 49800b25907d..d21991ee88c4 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -3,10 +3,9 @@
3 * 3 *
4 * This file could be shared with other drivers. 4 * This file could be shared with other drivers.
5 * 5 *
6 * (c) 2002, Benjamin Herrenscmidt (benh@kernel.crashing.org) 6 * (c) 2002-2007, Benjamin Herrenscmidt (benh@kernel.crashing.org)
7 * 7 *
8 * TODO: 8 * TODO:
9 * - Implement WOL
10 * - Add support for PHYs that provide an IRQ line 9 * - Add support for PHYs that provide an IRQ line
11 * - Eventually moved the entire polling state machine in 10 * - Eventually moved the entire polling state machine in
12 * there (out of the eth driver), so that it can easily be 11 * there (out of the eth driver), so that it can easily be
@@ -152,6 +151,44 @@ static int bcm5221_suspend(struct mii_phy* phy)
152 return 0; 151 return 0;
153} 152}
154 153
154static int bcm5241_init(struct mii_phy* phy)
155{
156 u16 data;
157
158 data = phy_read(phy, MII_BCM5221_TEST);
159 phy_write(phy, MII_BCM5221_TEST,
160 data | MII_BCM5221_TEST_ENABLE_SHADOWS);
161
162 data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2);
163 phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2,
164 data | MII_BCM5221_SHDOW_AUX_STAT2_APD);
165
166 data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
167 phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
168 data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
169
170 data = phy_read(phy, MII_BCM5221_TEST);
171 phy_write(phy, MII_BCM5221_TEST,
172 data & ~MII_BCM5221_TEST_ENABLE_SHADOWS);
173
174 return 0;
175}
176
177static int bcm5241_suspend(struct mii_phy* phy)
178{
179 u16 data;
180
181 data = phy_read(phy, MII_BCM5221_TEST);
182 phy_write(phy, MII_BCM5221_TEST,
183 data | MII_BCM5221_TEST_ENABLE_SHADOWS);
184
185 data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4);
186 phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4,
187 data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR);
188
189 return 0;
190}
191
155static int bcm5400_init(struct mii_phy* phy) 192static int bcm5400_init(struct mii_phy* phy)
156{ 193{
157 u16 data; 194 u16 data;
@@ -373,6 +410,10 @@ static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise)
373 adv |= ADVERTISE_100HALF; 410 adv |= ADVERTISE_100HALF;
374 if (advertise & ADVERTISED_100baseT_Full) 411 if (advertise & ADVERTISED_100baseT_Full)
375 adv |= ADVERTISE_100FULL; 412 adv |= ADVERTISE_100FULL;
413 if (advertise & ADVERTISED_Pause)
414 adv |= ADVERTISE_PAUSE_CAP;
415 if (advertise & ADVERTISED_Asym_Pause)
416 adv |= ADVERTISE_PAUSE_ASYM;
376 phy_write(phy, MII_ADVERTISE, adv); 417 phy_write(phy, MII_ADVERTISE, adv);
377 418
378 /* Setup 1000BT advertise */ 419 /* Setup 1000BT advertise */
@@ -436,12 +477,15 @@ static int bcm54xx_read_link(struct mii_phy *phy)
436 val = phy_read(phy, MII_BCM5400_AUXSTATUS); 477 val = phy_read(phy, MII_BCM5400_AUXSTATUS);
437 link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >> 478 link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >>
438 MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT); 479 MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT);
439 phy->duplex = phy_BCM5400_link_table[link_mode][0] ? DUPLEX_FULL : DUPLEX_HALF; 480 phy->duplex = phy_BCM5400_link_table[link_mode][0] ?
481 DUPLEX_FULL : DUPLEX_HALF;
440 phy->speed = phy_BCM5400_link_table[link_mode][2] ? 482 phy->speed = phy_BCM5400_link_table[link_mode][2] ?
441 SPEED_1000 : 483 SPEED_1000 :
442 (phy_BCM5400_link_table[link_mode][1] ? SPEED_100 : SPEED_10); 484 (phy_BCM5400_link_table[link_mode][1] ?
485 SPEED_100 : SPEED_10);
443 val = phy_read(phy, MII_LPA); 486 val = phy_read(phy, MII_LPA);
444 phy->pause = ((val & LPA_PAUSE) != 0); 487 phy->pause = (phy->duplex == DUPLEX_FULL) &&
488 ((val & LPA_PAUSE) != 0);
445 } 489 }
446 /* On non-aneg, we assume what we put in BMCR is the speed, 490 /* On non-aneg, we assume what we put in BMCR is the speed,
447 * though magic-aneg shouldn't prevent this case from occurring 491 * though magic-aneg shouldn't prevent this case from occurring
@@ -450,6 +494,28 @@ static int bcm54xx_read_link(struct mii_phy *phy)
450 return 0; 494 return 0;
451} 495}
452 496
497static int marvell88e1111_init(struct mii_phy* phy)
498{
499 u16 rev;
500
501 /* magic init sequence for rev 0 */
502 rev = phy_read(phy, MII_PHYSID2) & 0x000f;
503 if (rev == 0) {
504 phy_write(phy, 0x1d, 0x000a);
505 phy_write(phy, 0x1e, 0x0821);
506
507 phy_write(phy, 0x1d, 0x0006);
508 phy_write(phy, 0x1e, 0x8600);
509
510 phy_write(phy, 0x1d, 0x000b);
511 phy_write(phy, 0x1e, 0x0100);
512
513 phy_write(phy, 0x1d, 0x0004);
514 phy_write(phy, 0x1e, 0x4850);
515 }
516 return 0;
517}
518
453static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) 519static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
454{ 520{
455 u16 ctl, adv; 521 u16 ctl, adv;
@@ -471,6 +537,10 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise)
471 adv |= ADVERTISE_100HALF; 537 adv |= ADVERTISE_100HALF;
472 if (advertise & ADVERTISED_100baseT_Full) 538 if (advertise & ADVERTISED_100baseT_Full)
473 adv |= ADVERTISE_100FULL; 539 adv |= ADVERTISE_100FULL;
540 if (advertise & ADVERTISED_Pause)
541 adv |= ADVERTISE_PAUSE_CAP;
542 if (advertise & ADVERTISED_Asym_Pause)
543 adv |= ADVERTISE_PAUSE_ASYM;
474 phy_write(phy, MII_ADVERTISE, adv); 544 phy_write(phy, MII_ADVERTISE, adv);
475 545
476 /* Setup 1000BT advertise & enable crossover detect 546 /* Setup 1000BT advertise & enable crossover detect
@@ -549,7 +619,7 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd)
549 619
550static int marvell_read_link(struct mii_phy *phy) 620static int marvell_read_link(struct mii_phy *phy)
551{ 621{
552 u16 status; 622 u16 status, pmask;
553 623
554 if (phy->autoneg) { 624 if (phy->autoneg) {
555 status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS); 625 status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS);
@@ -565,7 +635,9 @@ static int marvell_read_link(struct mii_phy *phy)
565 phy->duplex = DUPLEX_FULL; 635 phy->duplex = DUPLEX_FULL;
566 else 636 else
567 phy->duplex = DUPLEX_HALF; 637 phy->duplex = DUPLEX_HALF;
568 phy->pause = 0; /* XXX Check against spec ! */ 638 pmask = MII_M1011_PHY_SPEC_STATUS_TX_PAUSE |
639 MII_M1011_PHY_SPEC_STATUS_RX_PAUSE;
640 phy->pause = (status & pmask) == pmask;
569 } 641 }
570 /* On non-aneg, we assume what we put in BMCR is the speed, 642 /* On non-aneg, we assume what we put in BMCR is the speed,
571 * though magic-aneg shouldn't prevent this case from occurring 643 * though magic-aneg shouldn't prevent this case from occurring
@@ -595,6 +667,10 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
595 adv |= ADVERTISE_100HALF; 667 adv |= ADVERTISE_100HALF;
596 if (advertise & ADVERTISED_100baseT_Full) 668 if (advertise & ADVERTISED_100baseT_Full)
597 adv |= ADVERTISE_100FULL; 669 adv |= ADVERTISE_100FULL;
670 if (advertise & ADVERTISED_Pause)
671 adv |= ADVERTISE_PAUSE_CAP;
672 if (advertise & ADVERTISED_Asym_Pause)
673 adv |= ADVERTISE_PAUSE_ASYM;
598 phy_write(phy, MII_ADVERTISE, adv); 674 phy_write(phy, MII_ADVERTISE, adv);
599 675
600 /* Start/Restart aneg */ 676 /* Start/Restart aneg */
@@ -666,7 +742,8 @@ static int genmii_read_link(struct mii_phy *phy)
666 phy->speed = SPEED_100; 742 phy->speed = SPEED_100;
667 else 743 else
668 phy->speed = SPEED_10; 744 phy->speed = SPEED_10;
669 phy->pause = 0; 745 phy->pause = (phy->duplex == DUPLEX_FULL) &&
746 ((lpa & LPA_PAUSE) != 0);
670 } 747 }
671 /* On non-aneg, we assume what we put in BMCR is the speed, 748 /* On non-aneg, we assume what we put in BMCR is the speed,
672 * though magic-aneg shouldn't prevent this case from occurring 749 * though magic-aneg shouldn't prevent this case from occurring
@@ -676,11 +753,19 @@ static int genmii_read_link(struct mii_phy *phy)
676} 753}
677 754
678 755
679#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ 756#define MII_BASIC_FEATURES \
680 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 757 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
681 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII) 758 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
682#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \ 759 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | \
683 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) 760 SUPPORTED_Pause)
761
762/* On gigabit capable PHYs, we advertise Pause support but not asym pause
763 * support for now as I'm not sure it's supported and Darwin doesn't do
764 * it neither. --BenH.
765 */
766#define MII_GBIT_FEATURES \
767 (MII_BASIC_FEATURES | \
768 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
684 769
685/* Broadcom BCM 5201 */ 770/* Broadcom BCM 5201 */
686static struct mii_phy_ops bcm5201_phy_ops = { 771static struct mii_phy_ops bcm5201_phy_ops = {
@@ -720,6 +805,24 @@ static struct mii_phy_def bcm5221_phy_def = {
720 .ops = &bcm5221_phy_ops 805 .ops = &bcm5221_phy_ops
721}; 806};
722 807
808/* Broadcom BCM 5241 */
809static struct mii_phy_ops bcm5241_phy_ops = {
810 .suspend = bcm5241_suspend,
811 .init = bcm5241_init,
812 .setup_aneg = genmii_setup_aneg,
813 .setup_forced = genmii_setup_forced,
814 .poll_link = genmii_poll_link,
815 .read_link = genmii_read_link,
816};
817static struct mii_phy_def bcm5241_phy_def = {
818 .phy_id = 0x0143bc30,
819 .phy_id_mask = 0xfffffff0,
820 .name = "BCM5241",
821 .features = MII_BASIC_FEATURES,
822 .magic_aneg = 1,
823 .ops = &bcm5241_phy_ops
824};
825
723/* Broadcom BCM 5400 */ 826/* Broadcom BCM 5400 */
724static struct mii_phy_ops bcm5400_phy_ops = { 827static struct mii_phy_ops bcm5400_phy_ops = {
725 .init = bcm5400_init, 828 .init = bcm5400_init,
@@ -854,11 +957,8 @@ static struct mii_phy_def bcm5462V_phy_def = {
854 .ops = &bcm5462V_phy_ops 957 .ops = &bcm5462V_phy_ops
855}; 958};
856 959
857/* Marvell 88E1101 (Apple seem to deal with 2 different revs, 960/* Marvell 88E1101 amd 88E1111 */
858 * I masked out the 8 last bits to get both, but some specs 961static struct mii_phy_ops marvell88e1101_phy_ops = {
859 * would be useful here) --BenH.
860 */
861static struct mii_phy_ops marvell_phy_ops = {
862 .suspend = generic_suspend, 962 .suspend = generic_suspend,
863 .setup_aneg = marvell_setup_aneg, 963 .setup_aneg = marvell_setup_aneg,
864 .setup_forced = marvell_setup_forced, 964 .setup_forced = marvell_setup_forced,
@@ -866,13 +966,41 @@ static struct mii_phy_ops marvell_phy_ops = {
866 .read_link = marvell_read_link 966 .read_link = marvell_read_link
867}; 967};
868 968
869static struct mii_phy_def marvell_phy_def = { 969static struct mii_phy_ops marvell88e1111_phy_ops = {
870 .phy_id = 0x01410c00, 970 .init = marvell88e1111_init,
871 .phy_id_mask = 0xffffff00, 971 .suspend = generic_suspend,
872 .name = "Marvell 88E1101", 972 .setup_aneg = marvell_setup_aneg,
973 .setup_forced = marvell_setup_forced,
974 .poll_link = genmii_poll_link,
975 .read_link = marvell_read_link
976};
977
978/* two revs in darwin for the 88e1101 ... I could use a datasheet
979 * to get the proper names...
980 */
981static struct mii_phy_def marvell88e1101v1_phy_def = {
982 .phy_id = 0x01410c20,
983 .phy_id_mask = 0xfffffff0,
984 .name = "Marvell 88E1101v1",
985 .features = MII_GBIT_FEATURES,
986 .magic_aneg = 1,
987 .ops = &marvell88e1101_phy_ops
988};
989static struct mii_phy_def marvell88e1101v2_phy_def = {
990 .phy_id = 0x01410c60,
991 .phy_id_mask = 0xfffffff0,
992 .name = "Marvell 88E1101v2",
993 .features = MII_GBIT_FEATURES,
994 .magic_aneg = 1,
995 .ops = &marvell88e1101_phy_ops
996};
997static struct mii_phy_def marvell88e1111_phy_def = {
998 .phy_id = 0x01410cc0,
999 .phy_id_mask = 0xfffffff0,
1000 .name = "Marvell 88E1111",
873 .features = MII_GBIT_FEATURES, 1001 .features = MII_GBIT_FEATURES,
874 .magic_aneg = 1, 1002 .magic_aneg = 1,
875 .ops = &marvell_phy_ops 1003 .ops = &marvell88e1111_phy_ops
876}; 1004};
877 1005
878/* Generic implementation for most 10/100 PHYs */ 1006/* Generic implementation for most 10/100 PHYs */
@@ -895,6 +1023,7 @@ static struct mii_phy_def genmii_phy_def = {
895static struct mii_phy_def* mii_phy_table[] = { 1023static struct mii_phy_def* mii_phy_table[] = {
896 &bcm5201_phy_def, 1024 &bcm5201_phy_def,
897 &bcm5221_phy_def, 1025 &bcm5221_phy_def,
1026 &bcm5241_phy_def,
898 &bcm5400_phy_def, 1027 &bcm5400_phy_def,
899 &bcm5401_phy_def, 1028 &bcm5401_phy_def,
900 &bcm5411_phy_def, 1029 &bcm5411_phy_def,
@@ -902,7 +1031,9 @@ static struct mii_phy_def* mii_phy_table[] = {
902 &bcm5421k2_phy_def, 1031 &bcm5421k2_phy_def,
903 &bcm5461_phy_def, 1032 &bcm5461_phy_def,
904 &bcm5462V_phy_def, 1033 &bcm5462V_phy_def,
905 &marvell_phy_def, 1034 &marvell88e1101v1_phy_def,
1035 &marvell88e1101v2_phy_def,
1036 &marvell88e1111_phy_def,
906 &genmii_phy_def, 1037 &genmii_phy_def,
907 NULL 1038 NULL
908}; 1039};
diff --git a/drivers/net/sungem_phy.h b/drivers/net/sungem_phy.h
index 8ee1ca0471cf..1d70ba6f9f10 100644
--- a/drivers/net/sungem_phy.h
+++ b/drivers/net/sungem_phy.h
@@ -30,7 +30,7 @@ struct mii_phy_def
30struct mii_phy 30struct mii_phy
31{ 31{
32 struct mii_phy_def* def; 32 struct mii_phy_def* def;
33 int advertising; 33 u32 advertising;
34 int mii_id; 34 int mii_id;
35 35
36 /* 1: autoneg enabled, 0: disabled */ 36 /* 1: autoneg enabled, 0: disabled */
@@ -85,6 +85,9 @@ extern int mii_phy_probe(struct mii_phy *phy, int mii_id);
85#define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001 85#define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001
86#define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004 86#define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004
87 87
88/* MII BCM5241 Additional registers */
89#define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008
90
88/* MII BCM5400 1000-BASET Control register */ 91/* MII BCM5400 1000-BASET Control register */
89#define MII_BCM5400_GB_CONTROL 0x09 92#define MII_BCM5400_GB_CONTROL 0x09
90#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200 93#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200
@@ -115,5 +118,7 @@ extern int mii_phy_probe(struct mii_phy *phy, int mii_id);
115#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 118#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
116#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 119#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
117#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 120#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
121#define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE 0x0008
122#define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE 0x0004
118 123
119#endif /* __SUNGEM_PHY_H__ */ 124#endif /* __SUNGEM_PHY_H__ */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 4056ba1ff3c7..f4bf62c2a7a5 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.71" 71#define DRV_MODULE_VERSION "3.72"
72#define DRV_MODULE_RELDATE "December 15, 2006" 72#define DRV_MODULE_RELDATE "January 8, 2007"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -1015,7 +1015,12 @@ out:
1015 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) { 1015 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1016 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1016 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 1017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1018 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b); 1018 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1019 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1020 tg3_writephy(tp, MII_TG3_TEST1,
1021 MII_TG3_TEST1_TRIM_EN | 0x4);
1022 } else
1023 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1019 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1024 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1020 } 1025 }
1021 /* Set Extended packet length bit (bit 14) on all chips that */ 1026 /* Set Extended packet length bit (bit 14) on all chips that */
@@ -10803,9 +10808,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
10803 10808
10804 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 10809 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10805 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 10810 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10806 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) 10811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10807 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; 10812 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10808 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) 10813 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
10814 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
10815 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10809 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 10816 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10810 } 10817 }
10811 10818
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index cf78a7e5997b..80f59ac7ec58 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1658,6 +1658,9 @@
1658#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */ 1658#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */
1659#define MII_TG3_EPHY_SHADOW_EN 0x80 1659#define MII_TG3_EPHY_SHADOW_EN 0x80
1660 1660
1661#define MII_TG3_TEST1 0x1e
1662#define MII_TG3_TEST1_TRIM_EN 0x0010
1663
1661/* There are two ways to manage the TX descriptors on the tigon3. 1664/* There are two ways to manage the TX descriptors on the tigon3.
1662 * Either the descriptors are in host DMA'able memory, or they 1665 * Either the descriptors are in host DMA'able memory, or they
1663 * exist only in the cards on-chip SRAM. All 16 send bds are under 1666 * exist only in the cards on-chip SRAM. All 16 send bds are under
@@ -2256,6 +2259,7 @@ struct tg3 {
2256#define TG3_FLG2_1SHOT_MSI 0x10000000 2259#define TG3_FLG2_1SHOT_MSI 0x10000000
2257#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 2260#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2258#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 2261#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
2262#define TG3_FLG2_PHY_ADJUST_TRIM 0x80000000
2259 2263
2260 u32 split_mode_max_reqs; 2264 u32 split_mode_max_reqs;
2261#define SPLIT_MODE_5704_MAX_REQ 3 2265#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 0e94fbbf7a94..b85857a84870 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -2664,7 +2664,7 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
2664 break; 2664 break;
2665 } 2665 }
2666#endif 2666#endif
2667 if (stats.len < sizeof(u->rx_data.header)) 2667 if (stats.len < sizeof(struct ieee80211_hdr_3addr))
2668 break; 2668 break;
2669 switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) { 2669 switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) {
2670 case IEEE80211_FTYPE_MGMT: 2670 case IEEE80211_FTYPE_MGMT:
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index 644b4741ef74..a009ab517710 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -406,7 +406,6 @@ static int netwave_probe(struct pcmcia_device *link)
406 link->conf.Attributes = CONF_ENABLE_IRQ; 406 link->conf.Attributes = CONF_ENABLE_IRQ;
407 link->conf.IntType = INT_MEMORY_AND_IO; 407 link->conf.IntType = INT_MEMORY_AND_IO;
408 link->conf.ConfigIndex = 1; 408 link->conf.ConfigIndex = 1;
409 link->conf.Present = PRESENT_OPTION;
410 409
411 /* Netwave private struct init. link/dev/node already taken care of, 410 /* Netwave private struct init. link/dev/node already taken care of,
412 * other stuff zero'd - Jean II */ 411 * other stuff zero'd - Jean II */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88e10c9bc4ac..47b2ccb6a633 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -331,7 +331,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
331 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 331 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
332 p_dev->conf.IntType = INT_MEMORY_AND_IO; 332 p_dev->conf.IntType = INT_MEMORY_AND_IO;
333 p_dev->conf.ConfigIndex = 1; 333 p_dev->conf.ConfigIndex = 1;
334 p_dev->conf.Present = PRESENT_OPTION;
335 334
336 p_dev->priv = dev; 335 p_dev->priv = dev;
337 p_dev->irq.Instance = dev; 336 p_dev->irq.Instance = dev;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 583e0d655a98..c250f08c8dd5 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1928,7 +1928,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1928 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 1928 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
1929 p_dev->conf.IntType = INT_MEMORY_AND_IO; 1929 p_dev->conf.IntType = INT_MEMORY_AND_IO;
1930 p_dev->conf.ConfigIndex = 1; 1930 p_dev->conf.ConfigIndex = 1;
1931 p_dev->conf.Present = PRESENT_OPTION;
1932 1931
1933 dev = alloc_etherdev(sizeof(struct wl3501_card)); 1932 dev = alloc_etherdev(sizeof(struct wl3501_card));
1934 if (!dev) 1933 if (!dev)
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index f1dd81a1d592..3cfb0a3575e6 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -19,7 +19,7 @@ config PCI_MSI
19 19
20config PCI_MULTITHREAD_PROBE 20config PCI_MULTITHREAD_PROBE
21 bool "PCI Multi-threaded probe (EXPERIMENTAL)" 21 bool "PCI Multi-threaded probe (EXPERIMENTAL)"
22 depends on PCI && EXPERIMENTAL 22 depends on PCI && EXPERIMENTAL && BROKEN
23 help 23 help
24 Say Y here if you want the PCI core to spawn a new thread for 24 Say Y here if you want the PCI core to spawn a new thread for
25 every PCI device that is probed. This can cause a huge 25 every PCI device that is probed. This can cause a huge
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6bfb942428e4..206c834d263a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -254,7 +254,8 @@ static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
254 if ((cap & mask) == ht_cap) 254 if ((cap & mask) == ht_cap)
255 return pos; 255 return pos;
256 256
257 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 257 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
258 pos + PCI_CAP_LIST_NEXT,
258 PCI_CAP_ID_HT, &ttl); 259 PCI_CAP_ID_HT, &ttl);
259 } 260 }
260 261
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8f0322d6f3bf..0a70943f8bb6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -955,7 +955,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_ho
955 * becomes necessary to do this tweak in two steps -- I've chosen the Host 955 * becomes necessary to do this tweak in two steps -- I've chosen the Host
956 * bridge as trigger. 956 * bridge as trigger.
957 */ 957 */
958static int __initdata asus_hides_smbus; 958static int asus_hides_smbus;
959 959
960static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) 960static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
961{ 961{
@@ -1117,10 +1117,11 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_h
1117static void quirk_sis_96x_smbus(struct pci_dev *dev) 1117static void quirk_sis_96x_smbus(struct pci_dev *dev)
1118{ 1118{
1119 u8 val = 0; 1119 u8 val = 0;
1120 printk(KERN_INFO "Enabling SiS 96x SMBus.\n");
1121 pci_read_config_byte(dev, 0x77, &val);
1122 pci_write_config_byte(dev, 0x77, val & ~0x10);
1123 pci_read_config_byte(dev, 0x77, &val); 1120 pci_read_config_byte(dev, 0x77, &val);
1121 if (val & 0x10) {
1122 printk(KERN_INFO "Enabling SiS 96x SMBus.\n");
1123 pci_write_config_byte(dev, 0x77, val & ~0x10);
1124 }
1124} 1125}
1125 1126
1126/* 1127/*
@@ -1152,11 +1153,12 @@ static void quirk_sis_503(struct pci_dev *dev)
1152 printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible); 1153 printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible);
1153 1154
1154 /* 1155 /*
1155 * Ok, it now shows up as a 96x.. The 96x quirks are after 1156 * Ok, it now shows up as a 96x.. run the 96x quirk by
1156 * the 503 quirk in the quirk table, so they'll automatically 1157 * hand in case it has already been processed.
1157 * run and enable things like the SMBus device 1158 * (depends on link order, which is apparently not guaranteed)
1158 */ 1159 */
1159 dev->device = devid; 1160 dev->device = devid;
1161 quirk_sis_96x_smbus(dev);
1160} 1162}
1161 1163
1162static void __init quirk_sis_96x_compatible(struct pci_dev *dev) 1164static void __init quirk_sis_96x_compatible(struct pci_dev *dev)
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 45f2b20ef513..fab381ed853c 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -193,6 +193,18 @@ static struct pci_dev * pci_find_subsys(unsigned int vendor,
193 struct pci_dev *dev; 193 struct pci_dev *dev;
194 194
195 WARN_ON(in_interrupt()); 195 WARN_ON(in_interrupt());
196
197 /*
198 * pci_find_subsys() can be called on the ide_setup() path, super-early
199 * in boot. But the down_read() will enable local interrupts, which
200 * can cause some machines to crash. So here we detect and flag that
201 * situation and bail out early.
202 */
203 if (unlikely(list_empty(&pci_devices))) {
204 printk(KERN_INFO "pci_find_subsys() called while pci_devices "
205 "is still empty\n");
206 return NULL;
207 }
196 down_read(&pci_bus_sem); 208 down_read(&pci_bus_sem);
197 n = from ? from->global_list.next : pci_devices.next; 209 n = from ? from->global_list.next : pci_devices.next;
198 210
@@ -259,6 +271,18 @@ pci_get_subsys(unsigned int vendor, unsigned int device,
259 struct pci_dev *dev; 271 struct pci_dev *dev;
260 272
261 WARN_ON(in_interrupt()); 273 WARN_ON(in_interrupt());
274
275 /*
276 * pci_get_subsys() can potentially be called by drivers super-early
277 * in boot. But the down_read() will enable local interrupts, which
278 * can cause some machines to crash. So here we detect and flag that
279 * situation and bail out early.
280 */
281 if (unlikely(list_empty(&pci_devices))) {
282 printk(KERN_NOTICE "pci_get_subsys() called while pci_devices "
283 "is still empty\n");
284 return NULL;
285 }
262 down_read(&pci_bus_sem); 286 down_read(&pci_bus_sem);
263 n = from ? from->global_list.next : pci_devices.next; 287 n = from ? from->global_list.next : pci_devices.next;
264 288
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 4f654c901c64..a724ab49a797 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -33,6 +33,8 @@
33 33
34#include <asm/mach/time.h> 34#include <asm/mach/time.h>
35 35
36#include <asm/arch/at91_rtc.h>
37
36 38
37#define AT91_RTC_FREQ 1 39#define AT91_RTC_FREQ 1
38#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ 40#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 1460f6b769f2..e7851e3739ab 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * An I2C driver for the Ricoh RS5C372 RTC 2 * An I2C driver for Ricoh RS5C372 and RV5C38[67] RTCs
3 * 3 *
4 * Copyright (C) 2005 Pavel Mironchik <pmironchik@optifacio.net> 4 * Copyright (C) 2005 Pavel Mironchik <pmironchik@optifacio.net>
5 * Copyright (C) 2006 Tower Technologies 5 * Copyright (C) 2006 Tower Technologies
@@ -13,7 +13,7 @@
13#include <linux/rtc.h> 13#include <linux/rtc.h>
14#include <linux/bcd.h> 14#include <linux/bcd.h>
15 15
16#define DRV_VERSION "0.3" 16#define DRV_VERSION "0.4"
17 17
18/* Addresses to scan */ 18/* Addresses to scan */
19static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END }; 19static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
@@ -21,6 +21,13 @@ static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
21/* Insmod parameters */ 21/* Insmod parameters */
22I2C_CLIENT_INSMOD; 22I2C_CLIENT_INSMOD;
23 23
24
25/*
26 * Ricoh has a family of I2C based RTCs, which differ only slightly from
27 * each other. Differences center on pinout (e.g. how many interrupts,
28 * output clock, etc) and how the control registers are used. The '372
29 * is significant only because that's the one this driver first supported.
30 */
24#define RS5C372_REG_SECS 0 31#define RS5C372_REG_SECS 0
25#define RS5C372_REG_MINS 1 32#define RS5C372_REG_MINS 1
26#define RS5C372_REG_HOURS 2 33#define RS5C372_REG_HOURS 2
@@ -29,59 +36,142 @@ I2C_CLIENT_INSMOD;
29#define RS5C372_REG_MONTH 5 36#define RS5C372_REG_MONTH 5
30#define RS5C372_REG_YEAR 6 37#define RS5C372_REG_YEAR 6
31#define RS5C372_REG_TRIM 7 38#define RS5C372_REG_TRIM 7
39# define RS5C372_TRIM_XSL 0x80
40# define RS5C372_TRIM_MASK 0x7F
41
42#define RS5C_REG_ALARM_A_MIN 8 /* or ALARM_W */
43#define RS5C_REG_ALARM_A_HOURS 9
44#define RS5C_REG_ALARM_A_WDAY 10
45
46#define RS5C_REG_ALARM_B_MIN 11 /* or ALARM_D */
47#define RS5C_REG_ALARM_B_HOURS 12
48#define RS5C_REG_ALARM_B_WDAY 13 /* (ALARM_B only) */
49
50#define RS5C_REG_CTRL1 14
51# define RS5C_CTRL1_AALE (1 << 7) /* or WALE */
52# define RS5C_CTRL1_BALE (1 << 6) /* or DALE */
53# define RV5C387_CTRL1_24 (1 << 5)
54# define RS5C372A_CTRL1_SL1 (1 << 5)
55# define RS5C_CTRL1_CT_MASK (7 << 0)
56# define RS5C_CTRL1_CT0 (0 << 0) /* no periodic irq */
57# define RS5C_CTRL1_CT4 (4 << 0) /* 1 Hz level irq */
58#define RS5C_REG_CTRL2 15
59# define RS5C372_CTRL2_24 (1 << 5)
60# define RS5C_CTRL2_XSTP (1 << 4)
61# define RS5C_CTRL2_CTFG (1 << 2)
62# define RS5C_CTRL2_AAFG (1 << 1) /* or WAFG */
63# define RS5C_CTRL2_BAFG (1 << 0) /* or DAFG */
64
65
66/* to read (style 1) or write registers starting at R */
67#define RS5C_ADDR(R) (((R) << 4) | 0)
68
69
70enum rtc_type {
71 rtc_undef = 0,
72 rtc_rs5c372a,
73 rtc_rs5c372b,
74 rtc_rv5c386,
75 rtc_rv5c387a,
76};
32 77
33#define RS5C372_TRIM_XSL 0x80 78/* REVISIT: this assumes that:
34#define RS5C372_TRIM_MASK 0x7F 79 * - we're in the 21st century, so it's safe to ignore the century
80 * bit for rv5c38[67] (REG_MONTH bit 7);
81 * - we should use ALARM_A not ALARM_B (may be wrong on some boards)
82 */
83struct rs5c372 {
84 struct i2c_client *client;
85 struct rtc_device *rtc;
86 enum rtc_type type;
87 unsigned time24:1;
88 unsigned has_irq:1;
89 char buf[17];
90 char *regs;
91
92 /* on conversion to a "new style" i2c driver, this vanishes */
93 struct i2c_client dev;
94};
35 95
36#define RS5C372_REG_BASE 0 96static int rs5c_get_regs(struct rs5c372 *rs5c)
97{
98 struct i2c_client *client = rs5c->client;
99 struct i2c_msg msgs[] = {
100 { client->addr, I2C_M_RD, sizeof rs5c->buf, rs5c->buf },
101 };
102
103 /* This implements the third reading method from the datasheet, using
104 * an internal address that's reset after each transaction (by STOP)
105 * to 0x0f ... so we read extra registers, and skip the first one.
106 *
107 * The first method doesn't work with the iop3xx adapter driver, on at
108 * least 80219 chips; this works around that bug.
109 */
110 if ((i2c_transfer(client->adapter, msgs, 1)) != 1) {
111 pr_debug("%s: can't read registers\n", rs5c->rtc->name);
112 return -EIO;
113 }
37 114
38static int rs5c372_attach(struct i2c_adapter *adapter); 115 dev_dbg(&client->dev,
39static int rs5c372_detach(struct i2c_client *client); 116 "%02x %02x %02x (%02x) %02x %02x %02x (%02x), "
40static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind); 117 "%02x %02x %02x, %02x %02x %02x; %02x %02x\n",
118 rs5c->regs[0], rs5c->regs[1], rs5c->regs[2], rs5c->regs[3],
119 rs5c->regs[4], rs5c->regs[5], rs5c->regs[6], rs5c->regs[7],
120 rs5c->regs[8], rs5c->regs[9], rs5c->regs[10], rs5c->regs[11],
121 rs5c->regs[12], rs5c->regs[13], rs5c->regs[14], rs5c->regs[15]);
41 122
42struct rs5c372 { 123 return 0;
43 u8 reg_addr; 124}
44 u8 regs[17];
45 struct i2c_msg msg[1];
46 struct i2c_client client;
47 struct rtc_device *rtc;
48};
49 125
50static struct i2c_driver rs5c372_driver = { 126static unsigned rs5c_reg2hr(struct rs5c372 *rs5c, unsigned reg)
51 .driver = { 127{
52 .name = "rs5c372", 128 unsigned hour;
53 },
54 .attach_adapter = &rs5c372_attach,
55 .detach_client = &rs5c372_detach,
56};
57 129
58static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm) 130 if (rs5c->time24)
131 return BCD2BIN(reg & 0x3f);
132
133 hour = BCD2BIN(reg & 0x1f);
134 if (hour == 12)
135 hour = 0;
136 if (reg & 0x20)
137 hour += 12;
138 return hour;
139}
140
141static unsigned rs5c_hr2reg(struct rs5c372 *rs5c, unsigned hour)
59{ 142{
143 if (rs5c->time24)
144 return BIN2BCD(hour);
145
146 if (hour > 12)
147 return 0x20 | BIN2BCD(hour - 12);
148 if (hour == 12)
149 return 0x20 | BIN2BCD(12);
150 if (hour == 0)
151 return BIN2BCD(12);
152 return BIN2BCD(hour);
153}
60 154
61 struct rs5c372 *rs5c372 = i2c_get_clientdata(client); 155static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
62 u8 *buf = &(rs5c372->regs[1]); 156{
157 struct rs5c372 *rs5c = i2c_get_clientdata(client);
158 int status = rs5c_get_regs(rs5c);
63 159
64 /* this implements the 3rd reading method, according 160 if (status < 0)
65 * to the datasheet. rs5c372 defaults to internal 161 return status;
66 * address 0xF, so 0x0 is in regs[1]
67 */
68 162
69 if ((i2c_transfer(client->adapter, rs5c372->msg, 1)) != 1) { 163 tm->tm_sec = BCD2BIN(rs5c->regs[RS5C372_REG_SECS] & 0x7f);
70 dev_err(&client->dev, "%s: read error\n", __FUNCTION__); 164 tm->tm_min = BCD2BIN(rs5c->regs[RS5C372_REG_MINS] & 0x7f);
71 return -EIO; 165 tm->tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C372_REG_HOURS]);
72 }
73 166
74 tm->tm_sec = BCD2BIN(buf[RS5C372_REG_SECS] & 0x7f); 167 tm->tm_wday = BCD2BIN(rs5c->regs[RS5C372_REG_WDAY] & 0x07);
75 tm->tm_min = BCD2BIN(buf[RS5C372_REG_MINS] & 0x7f); 168 tm->tm_mday = BCD2BIN(rs5c->regs[RS5C372_REG_DAY] & 0x3f);
76 tm->tm_hour = BCD2BIN(buf[RS5C372_REG_HOURS] & 0x3f);
77 tm->tm_wday = BCD2BIN(buf[RS5C372_REG_WDAY] & 0x07);
78 tm->tm_mday = BCD2BIN(buf[RS5C372_REG_DAY] & 0x3f);
79 169
80 /* tm->tm_mon is zero-based */ 170 /* tm->tm_mon is zero-based */
81 tm->tm_mon = BCD2BIN(buf[RS5C372_REG_MONTH] & 0x1f) - 1; 171 tm->tm_mon = BCD2BIN(rs5c->regs[RS5C372_REG_MONTH] & 0x1f) - 1;
82 172
83 /* year is 1900 + tm->tm_year */ 173 /* year is 1900 + tm->tm_year */
84 tm->tm_year = BCD2BIN(buf[RS5C372_REG_YEAR]) + 100; 174 tm->tm_year = BCD2BIN(rs5c->regs[RS5C372_REG_YEAR]) + 100;
85 175
86 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " 176 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
87 "mday=%d, mon=%d, year=%d, wday=%d\n", 177 "mday=%d, mon=%d, year=%d, wday=%d\n",
@@ -89,22 +179,25 @@ static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
89 tm->tm_sec, tm->tm_min, tm->tm_hour, 179 tm->tm_sec, tm->tm_min, tm->tm_hour,
90 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); 180 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
91 181
92 return 0; 182 /* rtc might need initialization */
183 return rtc_valid_tm(tm);
93} 184}
94 185
95static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm) 186static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
96{ 187{
97 unsigned char buf[8] = { RS5C372_REG_BASE }; 188 struct rs5c372 *rs5c = i2c_get_clientdata(client);
189 unsigned char buf[8];
98 190
99 dev_dbg(&client->dev, 191 dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d "
100 "%s: secs=%d, mins=%d, hours=%d "
101 "mday=%d, mon=%d, year=%d, wday=%d\n", 192 "mday=%d, mon=%d, year=%d, wday=%d\n",
102 __FUNCTION__, tm->tm_sec, tm->tm_min, tm->tm_hour, 193 __FUNCTION__,
194 tm->tm_sec, tm->tm_min, tm->tm_hour,
103 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); 195 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
104 196
197 buf[0] = RS5C_ADDR(RS5C372_REG_SECS);
105 buf[1] = BIN2BCD(tm->tm_sec); 198 buf[1] = BIN2BCD(tm->tm_sec);
106 buf[2] = BIN2BCD(tm->tm_min); 199 buf[2] = BIN2BCD(tm->tm_min);
107 buf[3] = BIN2BCD(tm->tm_hour); 200 buf[3] = rs5c_hr2reg(rs5c, tm->tm_hour);
108 buf[4] = BIN2BCD(tm->tm_wday); 201 buf[4] = BIN2BCD(tm->tm_wday);
109 buf[5] = BIN2BCD(tm->tm_mday); 202 buf[5] = BIN2BCD(tm->tm_mday);
110 buf[6] = BIN2BCD(tm->tm_mon + 1); 203 buf[6] = BIN2BCD(tm->tm_mon + 1);
@@ -118,21 +211,43 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
118 return 0; 211 return 0;
119} 212}
120 213
214#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
215#define NEED_TRIM
216#endif
217
218#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
219#define NEED_TRIM
220#endif
221
222#ifdef NEED_TRIM
121static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim) 223static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
122{ 224{
123 struct rs5c372 *rs5c372 = i2c_get_clientdata(client); 225 struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
124 u8 tmp = rs5c372->regs[RS5C372_REG_TRIM + 1]; 226 u8 tmp = rs5c372->regs[RS5C372_REG_TRIM];
125 227
126 if (osc) 228 if (osc)
127 *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768; 229 *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
128 230
129 if (trim) { 231 if (trim) {
130 *trim = tmp & RS5C372_TRIM_MASK; 232 dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, tmp);
131 dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, *trim); 233 tmp &= RS5C372_TRIM_MASK;
234 if (tmp & 0x3e) {
235 int t = tmp & 0x3f;
236
237 if (tmp & 0x40)
238 t = (~t | (s8)0xc0) + 1;
239 else
240 t = t - 1;
241
242 tmp = t * 2;
243 } else
244 tmp = 0;
245 *trim = tmp;
132 } 246 }
133 247
134 return 0; 248 return 0;
135} 249}
250#endif
136 251
137static int rs5c372_rtc_read_time(struct device *dev, struct rtc_time *tm) 252static int rs5c372_rtc_read_time(struct device *dev, struct rtc_time *tm)
138{ 253{
@@ -144,25 +259,190 @@ static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm)
144 return rs5c372_set_datetime(to_i2c_client(dev), tm); 259 return rs5c372_set_datetime(to_i2c_client(dev), tm);
145} 260}
146 261
262#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
263
264static int
265rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
266{
267 struct i2c_client *client = to_i2c_client(dev);
268 struct rs5c372 *rs5c = i2c_get_clientdata(client);
269 unsigned char buf[2];
270 int status;
271
272 buf[1] = rs5c->regs[RS5C_REG_CTRL1];
273 switch (cmd) {
274 case RTC_UIE_OFF:
275 case RTC_UIE_ON:
276 /* some 327a modes use a different IRQ pin for 1Hz irqs */
277 if (rs5c->type == rtc_rs5c372a
278 && (buf[1] & RS5C372A_CTRL1_SL1))
279 return -ENOIOCTLCMD;
280 case RTC_AIE_OFF:
281 case RTC_AIE_ON:
282 /* these irq management calls only make sense for chips
283 * which are wired up to an IRQ.
284 */
285 if (!rs5c->has_irq)
286 return -ENOIOCTLCMD;
287 break;
288 default:
289 return -ENOIOCTLCMD;
290 }
291
292 status = rs5c_get_regs(rs5c);
293 if (status < 0)
294 return status;
295
296 buf[0] = RS5C_ADDR(RS5C_REG_CTRL1);
297 switch (cmd) {
298 case RTC_AIE_OFF: /* alarm off */
299 buf[1] &= ~RS5C_CTRL1_AALE;
300 break;
301 case RTC_AIE_ON: /* alarm on */
302 buf[1] |= RS5C_CTRL1_AALE;
303 break;
304 case RTC_UIE_OFF: /* update off */
305 buf[1] &= ~RS5C_CTRL1_CT_MASK;
306 break;
307 case RTC_UIE_ON: /* update on */
308 buf[1] &= ~RS5C_CTRL1_CT_MASK;
309 buf[1] |= RS5C_CTRL1_CT4;
310 break;
311 }
312 if ((i2c_master_send(client, buf, 2)) != 2) {
313 printk(KERN_WARNING "%s: can't update alarm\n",
314 rs5c->rtc->name);
315 status = -EIO;
316 } else
317 rs5c->regs[RS5C_REG_CTRL1] = buf[1];
318 return status;
319}
320
321#else
322#define rs5c_rtc_ioctl NULL
323#endif
324
325
326/* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI,
327 * which only exposes a polled programming interface; and since
328 * these calls map directly to those EFI requests; we don't demand
329 * we have an IRQ for this chip when we go through this API.
330 *
331 * The older x86_pc derived RTC_ALM_{READ,SET} calls require irqs
332 * though, managed through RTC_AIE_{ON,OFF} requests.
333 */
334
335static int rs5c_read_alarm(struct device *dev, struct rtc_wkalrm *t)
336{
337 struct i2c_client *client = to_i2c_client(dev);
338 struct rs5c372 *rs5c = i2c_get_clientdata(client);
339 int status;
340
341 status = rs5c_get_regs(rs5c);
342 if (status < 0)
343 return status;
344
345 /* report alarm time */
346 t->time.tm_sec = 0;
347 t->time.tm_min = BCD2BIN(rs5c->regs[RS5C_REG_ALARM_A_MIN] & 0x7f);
348 t->time.tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C_REG_ALARM_A_HOURS]);
349 t->time.tm_mday = -1;
350 t->time.tm_mon = -1;
351 t->time.tm_year = -1;
352 t->time.tm_wday = -1;
353 t->time.tm_yday = -1;
354 t->time.tm_isdst = -1;
355
356 /* ... and status */
357 t->enabled = !!(rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE);
358 t->pending = !!(rs5c->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_AAFG);
359
360 return 0;
361}
362
363static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
364{
365 struct i2c_client *client = to_i2c_client(dev);
366 struct rs5c372 *rs5c = i2c_get_clientdata(client);
367 int status;
368 unsigned char buf[4];
369
370 /* only handle up to 24 hours in the future, like RTC_ALM_SET */
371 if (t->time.tm_mday != -1
372 || t->time.tm_mon != -1
373 || t->time.tm_year != -1)
374 return -EINVAL;
375
376 /* REVISIT: round up tm_sec */
377
378 /* if needed, disable irq (clears pending status) */
379 status = rs5c_get_regs(rs5c);
380 if (status < 0)
381 return status;
382 if (rs5c->regs[RS5C_REG_CTRL1] & RS5C_CTRL1_AALE) {
383 buf[0] = RS5C_ADDR(RS5C_REG_CTRL1);
384 buf[1] = rs5c->regs[RS5C_REG_CTRL1] & ~RS5C_CTRL1_AALE;
385 if (i2c_master_send(client, buf, 2) != 2) {
386 pr_debug("%s: can't disable alarm\n", rs5c->rtc->name);
387 return -EIO;
388 }
389 rs5c->regs[RS5C_REG_CTRL1] = buf[1];
390 }
391
392 /* set alarm */
393 buf[0] = RS5C_ADDR(RS5C_REG_ALARM_A_MIN);
394 buf[1] = BIN2BCD(t->time.tm_min);
395 buf[2] = rs5c_hr2reg(rs5c, t->time.tm_hour);
396 buf[3] = 0x7f; /* any/all days */
397 if ((i2c_master_send(client, buf, 4)) != 4) {
398 pr_debug("%s: can't set alarm time\n", rs5c->rtc->name);
399 return -EIO;
400 }
401
402 /* ... and maybe enable its irq */
403 if (t->enabled) {
404 buf[0] = RS5C_ADDR(RS5C_REG_CTRL1);
405 buf[1] = rs5c->regs[RS5C_REG_CTRL1] | RS5C_CTRL1_AALE;
406 if ((i2c_master_send(client, buf, 2)) != 2)
407 printk(KERN_WARNING "%s: can't enable alarm\n",
408 rs5c->rtc->name);
409 rs5c->regs[RS5C_REG_CTRL1] = buf[1];
410 }
411
412 return 0;
413}
414
415#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
416
147static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq) 417static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
148{ 418{
149 int err, osc, trim; 419 int err, osc, trim;
150 420
151 err = rs5c372_get_trim(to_i2c_client(dev), &osc, &trim); 421 err = rs5c372_get_trim(to_i2c_client(dev), &osc, &trim);
152 if (err == 0) { 422 if (err == 0) {
153 seq_printf(seq, "%d.%03d KHz\n", osc / 1000, osc % 1000); 423 seq_printf(seq, "crystal\t\t: %d.%03d KHz\n",
154 seq_printf(seq, "trim\t: %d\n", trim); 424 osc / 1000, osc % 1000);
425 seq_printf(seq, "trim\t\t: %d\n", trim);
155 } 426 }
156 427
157 return 0; 428 return 0;
158} 429}
159 430
431#else
432#define rs5c372_rtc_proc NULL
433#endif
434
160static const struct rtc_class_ops rs5c372_rtc_ops = { 435static const struct rtc_class_ops rs5c372_rtc_ops = {
161 .proc = rs5c372_rtc_proc, 436 .proc = rs5c372_rtc_proc,
437 .ioctl = rs5c_rtc_ioctl,
162 .read_time = rs5c372_rtc_read_time, 438 .read_time = rs5c372_rtc_read_time,
163 .set_time = rs5c372_rtc_set_time, 439 .set_time = rs5c372_rtc_set_time,
440 .read_alarm = rs5c_read_alarm,
441 .set_alarm = rs5c_set_alarm,
164}; 442};
165 443
444#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
445
166static ssize_t rs5c372_sysfs_show_trim(struct device *dev, 446static ssize_t rs5c372_sysfs_show_trim(struct device *dev,
167 struct device_attribute *attr, char *buf) 447 struct device_attribute *attr, char *buf)
168{ 448{
@@ -172,7 +452,7 @@ static ssize_t rs5c372_sysfs_show_trim(struct device *dev,
172 if (err) 452 if (err)
173 return err; 453 return err;
174 454
175 return sprintf(buf, "0x%2x\n", trim); 455 return sprintf(buf, "%d\n", trim);
176} 456}
177static DEVICE_ATTR(trim, S_IRUGO, rs5c372_sysfs_show_trim, NULL); 457static DEVICE_ATTR(trim, S_IRUGO, rs5c372_sysfs_show_trim, NULL);
178 458
@@ -189,16 +469,35 @@ static ssize_t rs5c372_sysfs_show_osc(struct device *dev,
189} 469}
190static DEVICE_ATTR(osc, S_IRUGO, rs5c372_sysfs_show_osc, NULL); 470static DEVICE_ATTR(osc, S_IRUGO, rs5c372_sysfs_show_osc, NULL);
191 471
192static int rs5c372_attach(struct i2c_adapter *adapter) 472static int rs5c_sysfs_register(struct device *dev)
193{ 473{
194 return i2c_probe(adapter, &addr_data, rs5c372_probe); 474 int err;
475
476 err = device_create_file(dev, &dev_attr_trim);
477 if (err)
478 return err;
479 err = device_create_file(dev, &dev_attr_osc);
480 if (err)
481 device_remove_file(dev, &dev_attr_trim);
482
483 return err;
484}
485
486#else
487static int rs5c_sysfs_register(struct device *dev)
488{
489 return 0;
195} 490}
491#endif /* SYSFS */
492
493static struct i2c_driver rs5c372_driver;
196 494
197static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) 495static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
198{ 496{
199 int err = 0; 497 int err = 0;
200 struct i2c_client *client; 498 struct i2c_client *client;
201 struct rs5c372 *rs5c372; 499 struct rs5c372 *rs5c372;
500 struct rtc_time tm;
202 501
203 dev_dbg(adapter->class_dev.dev, "%s\n", __FUNCTION__); 502 dev_dbg(adapter->class_dev.dev, "%s\n", __FUNCTION__);
204 503
@@ -211,7 +510,15 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
211 err = -ENOMEM; 510 err = -ENOMEM;
212 goto exit; 511 goto exit;
213 } 512 }
214 client = &rs5c372->client; 513
514 /* we read registers 0x0f then 0x00-0x0f; skip the first one */
515 rs5c372->regs=&rs5c372->buf[1];
516
517 /* On conversion to a "new style" i2c driver, we'll be handed
518 * the i2c_client (we won't create it)
519 */
520 client = &rs5c372->dev;
521 rs5c372->client = client;
215 522
216 /* I2C client */ 523 /* I2C client */
217 client->addr = address; 524 client->addr = address;
@@ -222,16 +529,99 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
222 529
223 i2c_set_clientdata(client, rs5c372); 530 i2c_set_clientdata(client, rs5c372);
224 531
225 rs5c372->msg[0].addr = address;
226 rs5c372->msg[0].flags = I2C_M_RD;
227 rs5c372->msg[0].len = sizeof(rs5c372->regs);
228 rs5c372->msg[0].buf = rs5c372->regs;
229
230 /* Inform the i2c layer */ 532 /* Inform the i2c layer */
231 if ((err = i2c_attach_client(client))) 533 if ((err = i2c_attach_client(client)))
232 goto exit_kfree; 534 goto exit_kfree;
233 535
234 dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n"); 536 err = rs5c_get_regs(rs5c372);
537 if (err < 0)
538 goto exit_detach;
539
540 /* For "new style" drivers, irq is in i2c_client and chip type
541 * info comes from i2c_client.dev.platform_data. Meanwhile:
542 *
543 * STICK BOARD-SPECIFIC SETUP CODE RIGHT HERE
544 */
545 if (rs5c372->type == rtc_undef) {
546 rs5c372->type = rtc_rs5c372b;
547 dev_warn(&client->dev, "assuming rs5c372b\n");
548 }
549
550 /* clock may be set for am/pm or 24 hr time */
551 switch (rs5c372->type) {
552 case rtc_rs5c372a:
553 case rtc_rs5c372b:
554 /* alarm uses ALARM_A; and nINTRA on 372a, nINTR on 372b.
555 * so does periodic irq, except some 327a modes.
556 */
557 if (rs5c372->regs[RS5C_REG_CTRL2] & RS5C372_CTRL2_24)
558 rs5c372->time24 = 1;
559 break;
560 case rtc_rv5c386:
561 case rtc_rv5c387a:
562 if (rs5c372->regs[RS5C_REG_CTRL1] & RV5C387_CTRL1_24)
563 rs5c372->time24 = 1;
564 /* alarm uses ALARM_W; and nINTRB for alarm and periodic
565 * irq, on both 386 and 387
566 */
567 break;
568 default:
569 dev_err(&client->dev, "unknown RTC type\n");
570 goto exit_detach;
571 }
572
573 /* if the oscillator lost power and no other software (like
574 * the bootloader) set it up, do it here.
575 */
576 if (rs5c372->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_XSTP) {
577 unsigned char buf[3];
578
579 rs5c372->regs[RS5C_REG_CTRL2] &= ~RS5C_CTRL2_XSTP;
580
581 buf[0] = RS5C_ADDR(RS5C_REG_CTRL1);
582 buf[1] = rs5c372->regs[RS5C_REG_CTRL1];
583 buf[2] = rs5c372->regs[RS5C_REG_CTRL2];
584
585 /* use 24hr mode */
586 switch (rs5c372->type) {
587 case rtc_rs5c372a:
588 case rtc_rs5c372b:
589 buf[2] |= RS5C372_CTRL2_24;
590 rs5c372->time24 = 1;
591 break;
592 case rtc_rv5c386:
593 case rtc_rv5c387a:
594 buf[1] |= RV5C387_CTRL1_24;
595 rs5c372->time24 = 1;
596 break;
597 default:
598 /* impossible */
599 break;
600 }
601
602 if ((i2c_master_send(client, buf, 3)) != 3) {
603 dev_err(&client->dev, "setup error\n");
604 goto exit_detach;
605 }
606 rs5c372->regs[RS5C_REG_CTRL1] = buf[1];
607 rs5c372->regs[RS5C_REG_CTRL2] = buf[2];
608 }
609
610 if (rs5c372_get_datetime(client, &tm) < 0)
611 dev_warn(&client->dev, "clock needs to be set\n");
612
613 dev_info(&client->dev, "%s found, %s, driver version " DRV_VERSION "\n",
614 ({ char *s; switch (rs5c372->type) {
615 case rtc_rs5c372a: s = "rs5c372a"; break;
616 case rtc_rs5c372b: s = "rs5c372b"; break;
617 case rtc_rv5c386: s = "rv5c386"; break;
618 case rtc_rv5c387a: s = "rv5c387a"; break;
619 default: s = "chip"; break;
620 }; s;}),
621 rs5c372->time24 ? "24hr" : "am/pm"
622 );
623
624 /* FIXME when client->irq exists, use it to register alarm irq */
235 625
236 rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name, 626 rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
237 &client->dev, &rs5c372_rtc_ops, THIS_MODULE); 627 &client->dev, &rs5c372_rtc_ops, THIS_MODULE);
@@ -241,18 +631,12 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
241 goto exit_detach; 631 goto exit_detach;
242 } 632 }
243 633
244 err = device_create_file(&client->dev, &dev_attr_trim); 634 err = rs5c_sysfs_register(&client->dev);
245 if (err) 635 if (err)
246 goto exit_devreg; 636 goto exit_devreg;
247 err = device_create_file(&client->dev, &dev_attr_osc);
248 if (err)
249 goto exit_trim;
250 637
251 return 0; 638 return 0;
252 639
253exit_trim:
254 device_remove_file(&client->dev, &dev_attr_trim);
255
256exit_devreg: 640exit_devreg:
257 rtc_device_unregister(rs5c372->rtc); 641 rtc_device_unregister(rs5c372->rtc);
258 642
@@ -266,6 +650,11 @@ exit:
266 return err; 650 return err;
267} 651}
268 652
653static int rs5c372_attach(struct i2c_adapter *adapter)
654{
655 return i2c_probe(adapter, &addr_data, rs5c372_probe);
656}
657
269static int rs5c372_detach(struct i2c_client *client) 658static int rs5c372_detach(struct i2c_client *client)
270{ 659{
271 int err; 660 int err;
@@ -274,6 +663,8 @@ static int rs5c372_detach(struct i2c_client *client)
274 if (rs5c372->rtc) 663 if (rs5c372->rtc)
275 rtc_device_unregister(rs5c372->rtc); 664 rtc_device_unregister(rs5c372->rtc);
276 665
666 /* REVISIT properly destroy the sysfs files ... */
667
277 if ((err = i2c_detach_client(client))) 668 if ((err = i2c_detach_client(client)))
278 return err; 669 return err;
279 670
@@ -281,6 +672,14 @@ static int rs5c372_detach(struct i2c_client *client)
281 return 0; 672 return 0;
282} 673}
283 674
675static struct i2c_driver rs5c372_driver = {
676 .driver = {
677 .name = "rtc-rs5c372",
678 },
679 .attach_adapter = &rs5c372_attach,
680 .detach_client = &rs5c372_detach,
681};
682
284static __init int rs5c372_init(void) 683static __init int rs5c372_init(void)
285{ 684{
286 return i2c_add_driver(&rs5c372_driver); 685 return i2c_add_driver(&rs5c372_driver);
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 72ba1a70f35f..e9e0934380b8 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -264,8 +264,6 @@ static int sh_rtc_proc(struct device *dev, struct seq_file *seq)
264 unsigned int tmp; 264 unsigned int tmp;
265 265
266 tmp = readb(rtc->regbase + RCR1); 266 tmp = readb(rtc->regbase + RCR1);
267 seq_printf(seq, "alarm_IRQ\t: %s\n",
268 (tmp & RCR1_AIE) ? "yes" : "no");
269 seq_printf(seq, "carry_IRQ\t: %s\n", 267 seq_printf(seq, "carry_IRQ\t: %s\n",
270 (tmp & RCR1_CIE) ? "yes" : "no"); 268 (tmp & RCR1_CIE) ? "yes" : "no");
271 269
@@ -428,6 +426,8 @@ static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
428 tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */ 426 tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
429 tm->tm_year = 0xffff; 427 tm->tm_year = 0xffff;
430 428
429 wkalrm->enabled = (readb(rtc->regbase + RCR1) & RCR1_AIE) ? 1 : 0;
430
431 spin_unlock_irq(&rtc->lock); 431 spin_unlock_irq(&rtc->lock);
432 432
433 return 0; 433 return 0;
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 1678b6c757ec..a420cd099041 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -117,7 +117,7 @@ vmcp_write(struct file *file, const char __user * buff, size_t count,
117 return -ENOMEM; 117 return -ENOMEM;
118 } 118 }
119 debug_text_event(vmcp_debug, 1, cmd); 119 debug_text_event(vmcp_debug, 1, cmd);
120 session->resp_size = __cpcmd(cmd, session->response, 120 session->resp_size = cpcmd(cmd, session->response,
121 session->bufsize, 121 session->bufsize,
122 &session->resp_code); 122 &session->resp_code);
123 up(&session->mutex); 123 up(&session->mutex);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b471ac4a1bf6..ae1bf231d089 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -880,19 +880,15 @@ static void cio_reset_pgm_check_handler(void)
880static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) 880static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
881{ 881{
882 int rc; 882 int rc;
883 register struct subchannel_id reg1 asm ("1") = schid;
884 883
885 pgm_check_occured = 0; 884 pgm_check_occured = 0;
886 s390_reset_pgm_handler = cio_reset_pgm_check_handler; 885 s390_reset_pgm_handler = cio_reset_pgm_check_handler;
886 rc = stsch(schid, addr);
887 s390_reset_pgm_handler = NULL;
887 888
888 asm volatile( 889 /* The program check handler could have changed pgm_check_occured */
889 " stsch 0(%2)\n" 890 barrier();
890 " ipm %0\n"
891 " srl %0,28"
892 : "=d" (rc)
893 : "d" (reg1), "a" (addr), "m" (*addr) : "memory", "cc");
894 891
895 s390_reset_pgm_handler = NULL;
896 if (pgm_check_occured) 892 if (pgm_check_occured)
897 return -EIO; 893 return -EIO;
898 else 894 else
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 1a93fa684e9f..52625153a4f0 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -27,10 +27,7 @@ config IUCV
27 help 27 help
28 Select this option if you want to use inter-user communication 28 Select this option if you want to use inter-user communication
29 under VM or VIF. If unsure, say "Y" to enable a fast communication 29 under VM or VIF. If unsure, say "Y" to enable a fast communication
30 link between VM guests. At boot time the user ID of the guest needs 30 link between VM guests.
31 to be passed to the kernel. Note that both kernels need to be
32 compiled with this option and both need to be booted with the user ID
33 of the other VM guest.
34 31
35config NETIUCV 32config NETIUCV
36 tristate "IUCV network device support (VM only)" 33 tristate "IUCV network device support (VM only)"
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 53c358c7d368..e95c281f1e36 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -710,7 +710,7 @@ struct qeth_reply {
710 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long); 710 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long);
711 u32 seqno; 711 u32 seqno;
712 unsigned long offset; 712 unsigned long offset;
713 int received; 713 atomic_t received;
714 int rc; 714 int rc;
715 void *param; 715 void *param;
716 struct qeth_card *card; 716 struct qeth_card *card;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 2bde4f1fb9c2..d2efa5ff125d 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -471,7 +471,7 @@ qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
471 channel->state == CH_STATE_UP) 471 channel->state == CH_STATE_UP)
472 qeth_issue_next_read(card); 472 qeth_issue_next_read(card);
473 473
474 tasklet_schedule(&channel->irq_tasklet); 474 qeth_irq_tasklet((unsigned long)channel);
475 return; 475 return;
476out: 476out:
477 wake_up(&card->wait_q); 477 wake_up(&card->wait_q);
@@ -951,40 +951,6 @@ qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
951} 951}
952 952
953static int 953static int
954qeth_register_ip_addresses(void *ptr)
955{
956 struct qeth_card *card;
957
958 card = (struct qeth_card *) ptr;
959 daemonize("qeth_reg_ip");
960 QETH_DBF_TEXT(trace,4,"regipth1");
961 if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD))
962 return 0;
963 QETH_DBF_TEXT(trace,4,"regipth2");
964 qeth_set_ip_addr_list(card);
965 qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD);
966 return 0;
967}
968
969/*
970 * Drive the SET_PROMISC_MODE thread
971 */
972static int
973qeth_set_promisc_mode(void *ptr)
974{
975 struct qeth_card *card = (struct qeth_card *) ptr;
976
977 daemonize("qeth_setprm");
978 QETH_DBF_TEXT(trace,4,"setprm1");
979 if (!qeth_do_run_thread(card, QETH_SET_PROMISC_MODE_THREAD))
980 return 0;
981 QETH_DBF_TEXT(trace,4,"setprm2");
982 qeth_setadp_promisc_mode(card);
983 qeth_clear_thread_running_bit(card, QETH_SET_PROMISC_MODE_THREAD);
984 return 0;
985}
986
987static int
988qeth_recover(void *ptr) 954qeth_recover(void *ptr)
989{ 955{
990 struct qeth_card *card; 956 struct qeth_card *card;
@@ -1047,11 +1013,6 @@ qeth_start_kernel_thread(struct work_struct *work)
1047 if (card->read.state != CH_STATE_UP && 1013 if (card->read.state != CH_STATE_UP &&
1048 card->write.state != CH_STATE_UP) 1014 card->write.state != CH_STATE_UP)
1049 return; 1015 return;
1050
1051 if (qeth_do_start_thread(card, QETH_SET_IP_THREAD))
1052 kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD);
1053 if (qeth_do_start_thread(card, QETH_SET_PROMISC_MODE_THREAD))
1054 kernel_thread(qeth_set_promisc_mode, (void *)card, SIGCHLD);
1055 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) 1016 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1056 kernel_thread(qeth_recover, (void *) card, SIGCHLD); 1017 kernel_thread(qeth_recover, (void *) card, SIGCHLD);
1057} 1018}
@@ -1074,7 +1035,7 @@ qeth_set_intial_options(struct qeth_card *card)
1074 card->options.layer2 = 1; 1035 card->options.layer2 = 1;
1075 else 1036 else
1076 card->options.layer2 = 0; 1037 card->options.layer2 = 0;
1077 card->options.performance_stats = 1; 1038 card->options.performance_stats = 0;
1078} 1039}
1079 1040
1080/** 1041/**
@@ -1613,8 +1574,6 @@ qeth_issue_next_read(struct qeth_card *card)
1613 return -ENOMEM; 1574 return -ENOMEM;
1614 } 1575 }
1615 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); 1576 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1616 wait_event(card->wait_q,
1617 atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0);
1618 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1577 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1619 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, 1578 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1620 (addr_t) iob, 0, 0); 1579 (addr_t) iob, 0, 0);
@@ -1635,6 +1594,7 @@ qeth_alloc_reply(struct qeth_card *card)
1635 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); 1594 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
1636 if (reply){ 1595 if (reply){
1637 atomic_set(&reply->refcnt, 1); 1596 atomic_set(&reply->refcnt, 1);
1597 atomic_set(&reply->received, 0);
1638 reply->card = card; 1598 reply->card = card;
1639 }; 1599 };
1640 return reply; 1600 return reply;
@@ -1655,31 +1615,6 @@ qeth_put_reply(struct qeth_reply *reply)
1655 kfree(reply); 1615 kfree(reply);
1656} 1616}
1657 1617
1658static void
1659qeth_cmd_timeout(unsigned long data)
1660{
1661 struct qeth_reply *reply, *list_reply, *r;
1662 unsigned long flags;
1663
1664 reply = (struct qeth_reply *) data;
1665 spin_lock_irqsave(&reply->card->lock, flags);
1666 list_for_each_entry_safe(list_reply, r,
1667 &reply->card->cmd_waiter_list, list) {
1668 if (reply == list_reply){
1669 qeth_get_reply(reply);
1670 list_del_init(&reply->list);
1671 spin_unlock_irqrestore(&reply->card->lock, flags);
1672 reply->rc = -ETIME;
1673 reply->received = 1;
1674 wake_up(&reply->wait_q);
1675 qeth_put_reply(reply);
1676 return;
1677 }
1678 }
1679 spin_unlock_irqrestore(&reply->card->lock, flags);
1680}
1681
1682
1683static struct qeth_ipa_cmd * 1618static struct qeth_ipa_cmd *
1684qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) 1619qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1685{ 1620{
@@ -1745,7 +1680,7 @@ qeth_clear_ipacmd_list(struct qeth_card *card)
1745 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { 1680 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
1746 qeth_get_reply(reply); 1681 qeth_get_reply(reply);
1747 reply->rc = -EIO; 1682 reply->rc = -EIO;
1748 reply->received = 1; 1683 atomic_inc(&reply->received);
1749 list_del_init(&reply->list); 1684 list_del_init(&reply->list);
1750 wake_up(&reply->wait_q); 1685 wake_up(&reply->wait_q);
1751 qeth_put_reply(reply); 1686 qeth_put_reply(reply);
@@ -1814,7 +1749,7 @@ qeth_send_control_data_cb(struct qeth_channel *channel,
1814 &card->cmd_waiter_list); 1749 &card->cmd_waiter_list);
1815 spin_unlock_irqrestore(&card->lock, flags); 1750 spin_unlock_irqrestore(&card->lock, flags);
1816 } else { 1751 } else {
1817 reply->received = 1; 1752 atomic_inc(&reply->received);
1818 wake_up(&reply->wait_q); 1753 wake_up(&reply->wait_q);
1819 } 1754 }
1820 qeth_put_reply(reply); 1755 qeth_put_reply(reply);
@@ -1858,7 +1793,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
1858 int rc; 1793 int rc;
1859 unsigned long flags; 1794 unsigned long flags;
1860 struct qeth_reply *reply = NULL; 1795 struct qeth_reply *reply = NULL;
1861 struct timer_list timer; 1796 unsigned long timeout;
1862 1797
1863 QETH_DBF_TEXT(trace, 2, "sendctl"); 1798 QETH_DBF_TEXT(trace, 2, "sendctl");
1864 1799
@@ -1873,21 +1808,20 @@ qeth_send_control_data(struct qeth_card *card, int len,
1873 reply->seqno = QETH_IDX_COMMAND_SEQNO; 1808 reply->seqno = QETH_IDX_COMMAND_SEQNO;
1874 else 1809 else
1875 reply->seqno = card->seqno.ipa++; 1810 reply->seqno = card->seqno.ipa++;
1876 init_timer(&timer);
1877 timer.function = qeth_cmd_timeout;
1878 timer.data = (unsigned long) reply;
1879 init_waitqueue_head(&reply->wait_q); 1811 init_waitqueue_head(&reply->wait_q);
1880 spin_lock_irqsave(&card->lock, flags); 1812 spin_lock_irqsave(&card->lock, flags);
1881 list_add_tail(&reply->list, &card->cmd_waiter_list); 1813 list_add_tail(&reply->list, &card->cmd_waiter_list);
1882 spin_unlock_irqrestore(&card->lock, flags); 1814 spin_unlock_irqrestore(&card->lock, flags);
1883 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); 1815 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1884 wait_event(card->wait_q, 1816
1885 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); 1817 while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
1886 qeth_prepare_control_data(card, len, iob); 1818 qeth_prepare_control_data(card, len, iob);
1819
1887 if (IS_IPA(iob->data)) 1820 if (IS_IPA(iob->data))
1888 timer.expires = jiffies + QETH_IPA_TIMEOUT; 1821 timeout = jiffies + QETH_IPA_TIMEOUT;
1889 else 1822 else
1890 timer.expires = jiffies + QETH_TIMEOUT; 1823 timeout = jiffies + QETH_TIMEOUT;
1824
1891 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1825 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1892 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1826 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
1893 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, 1827 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
@@ -1906,9 +1840,16 @@ qeth_send_control_data(struct qeth_card *card, int len,
1906 wake_up(&card->wait_q); 1840 wake_up(&card->wait_q);
1907 return rc; 1841 return rc;
1908 } 1842 }
1909 add_timer(&timer); 1843 while (!atomic_read(&reply->received)) {
1910 wait_event(reply->wait_q, reply->received); 1844 if (time_after(jiffies, timeout)) {
1911 del_timer_sync(&timer); 1845 spin_lock_irqsave(&reply->card->lock, flags);
1846 list_del_init(&reply->list);
1847 spin_unlock_irqrestore(&reply->card->lock, flags);
1848 reply->rc = -ETIME;
1849 atomic_inc(&reply->received);
1850 wake_up(&reply->wait_q);
1851 }
1852 };
1912 rc = reply->rc; 1853 rc = reply->rc;
1913 qeth_put_reply(reply); 1854 qeth_put_reply(reply);
1914 return rc; 1855 return rc;
@@ -2466,32 +2407,17 @@ qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb,
2466 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr); 2407 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr);
2467} 2408}
2468 2409
2469static inline __u16 2410static inline void
2470qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 2411qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2471 struct qeth_hdr *hdr) 2412 struct qeth_hdr *hdr)
2472{ 2413{
2473 unsigned short vlan_id = 0;
2474#ifdef CONFIG_QETH_VLAN
2475 struct vlan_hdr *vhdr;
2476#endif
2477
2478 skb->pkt_type = PACKET_HOST; 2414 skb->pkt_type = PACKET_HOST;
2479 skb->protocol = qeth_type_trans(skb, skb->dev); 2415 skb->protocol = qeth_type_trans(skb, skb->dev);
2480 if (card->options.checksum_type == NO_CHECKSUMMING) 2416 if (card->options.checksum_type == NO_CHECKSUMMING)
2481 skb->ip_summed = CHECKSUM_UNNECESSARY; 2417 skb->ip_summed = CHECKSUM_UNNECESSARY;
2482 else 2418 else
2483 skb->ip_summed = CHECKSUM_NONE; 2419 skb->ip_summed = CHECKSUM_NONE;
2484#ifdef CONFIG_QETH_VLAN
2485 if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) {
2486 vhdr = (struct vlan_hdr *) skb->data;
2487 skb->protocol =
2488 __constant_htons(vhdr->h_vlan_encapsulated_proto);
2489 vlan_id = hdr->hdr.l2.vlan_id;
2490 skb_pull(skb, VLAN_HLEN);
2491 }
2492#endif
2493 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2494 return vlan_id;
2495} 2421}
2496 2422
2497static inline __u16 2423static inline __u16
@@ -2560,7 +2486,6 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2560 int offset; 2486 int offset;
2561 int rxrc; 2487 int rxrc;
2562 __u16 vlan_tag = 0; 2488 __u16 vlan_tag = 0;
2563 __u16 *vlan_addr;
2564 2489
2565 /* get first element of current buffer */ 2490 /* get first element of current buffer */
2566 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; 2491 element = (struct qdio_buffer_element *)&buf->buffer->element[0];
@@ -2571,7 +2496,7 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2571 &offset, &hdr))) { 2496 &offset, &hdr))) {
2572 skb->dev = card->dev; 2497 skb->dev = card->dev;
2573 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) 2498 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
2574 vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); 2499 qeth_layer2_rebuild_skb(card, skb, hdr);
2575 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) 2500 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3)
2576 vlan_tag = qeth_rebuild_skb(card, skb, hdr); 2501 vlan_tag = qeth_rebuild_skb(card, skb, hdr);
2577 else { /*in case of OSN*/ 2502 else { /*in case of OSN*/
@@ -3968,13 +3893,22 @@ static inline struct sk_buff *
3968qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, 3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3969 struct qeth_hdr **hdr, int ipv) 3894 struct qeth_hdr **hdr, int ipv)
3970{ 3895{
3971 struct sk_buff *new_skb; 3896 struct sk_buff *new_skb, *new_skb2;
3972 3897
3973 QETH_DBF_TEXT(trace, 6, "prepskb"); 3898 QETH_DBF_TEXT(trace, 6, "prepskb");
3974 3899 new_skb = skb;
3975 new_skb = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr)); 3900 new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC);
3976 if (new_skb == NULL) 3901 if (!new_skb)
3902 return NULL;
3903 new_skb2 = qeth_realloc_headroom(card, new_skb,
3904 sizeof(struct qeth_hdr));
3905 if (!new_skb2) {
3906 __qeth_free_new_skb(skb, new_skb);
3977 return NULL; 3907 return NULL;
3908 }
3909 if (new_skb != skb)
3910 __qeth_free_new_skb(new_skb2, new_skb);
3911 new_skb = new_skb2;
3978 *hdr = __qeth_prepare_skb(card, new_skb, ipv); 3912 *hdr = __qeth_prepare_skb(card, new_skb, ipv);
3979 if (*hdr == NULL) { 3913 if (*hdr == NULL) {
3980 __qeth_free_new_skb(skb, new_skb); 3914 __qeth_free_new_skb(skb, new_skb);
@@ -4844,9 +4778,11 @@ qeth_arp_query(struct qeth_card *card, char __user *udata)
4844 "(0x%x/%d)\n", 4778 "(0x%x/%d)\n",
4845 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), 4779 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc),
4846 tmp, tmp); 4780 tmp, tmp);
4847 copy_to_user(udata, qinfo.udata, 4); 4781 if (copy_to_user(udata, qinfo.udata, 4))
4782 rc = -EFAULT;
4848 } else { 4783 } else {
4849 copy_to_user(udata, qinfo.udata, qinfo.udata_len); 4784 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4785 rc = -EFAULT;
4850 } 4786 }
4851 kfree(qinfo.udata); 4787 kfree(qinfo.udata);
4852 return rc; 4788 return rc;
@@ -4992,8 +4928,10 @@ qeth_snmp_command(struct qeth_card *card, char __user *udata)
4992 if (rc) 4928 if (rc)
4993 PRINT_WARN("SNMP command failed on %s: (0x%x)\n", 4929 PRINT_WARN("SNMP command failed on %s: (0x%x)\n",
4994 QETH_CARD_IFNAME(card), rc); 4930 QETH_CARD_IFNAME(card), rc);
4995 else 4931 else {
4996 copy_to_user(udata, qinfo.udata, qinfo.udata_len); 4932 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4933 rc = -EFAULT;
4934 }
4997 4935
4998 kfree(ureq); 4936 kfree(ureq);
4999 kfree(qinfo.udata); 4937 kfree(qinfo.udata);
@@ -5544,12 +5482,10 @@ qeth_set_multicast_list(struct net_device *dev)
5544 qeth_add_multicast_ipv6(card); 5482 qeth_add_multicast_ipv6(card);
5545#endif 5483#endif
5546out: 5484out:
5547 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 5485 qeth_set_ip_addr_list(card);
5548 schedule_work(&card->kernel_thread_starter);
5549 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 5486 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
5550 return; 5487 return;
5551 if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0) 5488 qeth_setadp_promisc_mode(card);
5552 schedule_work(&card->kernel_thread_starter);
5553} 5489}
5554 5490
5555static int 5491static int
@@ -6351,6 +6287,42 @@ static struct ethtool_ops qeth_ethtool_ops = {
6351}; 6287};
6352 6288
6353static int 6289static int
6290qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr)
6291{
6292 struct qeth_card *card;
6293 struct ethhdr *eth;
6294
6295 card = qeth_get_card_from_dev(skb->dev);
6296 if (card->options.layer2)
6297 goto haveheader;
6298#ifdef CONFIG_QETH_IPV6
6299 /* cause of the manipulated arp constructor and the ARP
6300 flag for OSAE devices we have some nasty exceptions */
6301 if (card->info.type == QETH_CARD_TYPE_OSAE) {
6302 if (!card->options.fake_ll) {
6303 if ((skb->pkt_type==PACKET_OUTGOING) &&
6304 (skb->protocol==ETH_P_IPV6))
6305 goto haveheader;
6306 else
6307 return 0;
6308 } else {
6309 if ((skb->pkt_type==PACKET_OUTGOING) &&
6310 (skb->protocol==ETH_P_IP))
6311 return 0;
6312 else
6313 goto haveheader;
6314 }
6315 }
6316#endif
6317 if (!card->options.fake_ll)
6318 return 0;
6319haveheader:
6320 eth = eth_hdr(skb);
6321 memcpy(haddr, eth->h_source, ETH_ALEN);
6322 return ETH_ALEN;
6323}
6324
6325static int
6354qeth_netdev_init(struct net_device *dev) 6326qeth_netdev_init(struct net_device *dev)
6355{ 6327{
6356 struct qeth_card *card; 6328 struct qeth_card *card;
@@ -6388,7 +6360,10 @@ qeth_netdev_init(struct net_device *dev)
6388 if (card->options.fake_ll && 6360 if (card->options.fake_ll &&
6389 (qeth_get_netdev_flags(card) & IFF_NOARP)) 6361 (qeth_get_netdev_flags(card) & IFF_NOARP))
6390 dev->hard_header = qeth_fake_header; 6362 dev->hard_header = qeth_fake_header;
6391 dev->hard_header_parse = NULL; 6363 if (dev->type == ARPHRD_IEEE802_TR)
6364 dev->hard_header_parse = NULL;
6365 else
6366 dev->hard_header_parse = qeth_hard_header_parse;
6392 dev->set_mac_address = qeth_layer2_set_mac_address; 6367 dev->set_mac_address = qeth_layer2_set_mac_address;
6393 dev->flags |= qeth_get_netdev_flags(card); 6368 dev->flags |= qeth_get_netdev_flags(card);
6394 if ((card->options.fake_broadcast) || 6369 if ((card->options.fake_broadcast) ||
@@ -8235,8 +8210,7 @@ qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8235 } 8210 }
8236 if (!qeth_add_ip(card, ipaddr)) 8211 if (!qeth_add_ip(card, ipaddr))
8237 kfree(ipaddr); 8212 kfree(ipaddr);
8238 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8213 qeth_set_ip_addr_list(card);
8239 schedule_work(&card->kernel_thread_starter);
8240 return rc; 8214 return rc;
8241} 8215}
8242 8216
@@ -8264,8 +8238,7 @@ qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
8264 return; 8238 return;
8265 if (!qeth_delete_ip(card, ipaddr)) 8239 if (!qeth_delete_ip(card, ipaddr))
8266 kfree(ipaddr); 8240 kfree(ipaddr);
8267 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8241 qeth_set_ip_addr_list(card);
8268 schedule_work(&card->kernel_thread_starter);
8269} 8242}
8270 8243
8271/* 8244/*
@@ -8308,8 +8281,7 @@ qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8308 } 8281 }
8309 if (!qeth_add_ip(card, ipaddr)) 8282 if (!qeth_add_ip(card, ipaddr))
8310 kfree(ipaddr); 8283 kfree(ipaddr);
8311 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8284 qeth_set_ip_addr_list(card);
8312 schedule_work(&card->kernel_thread_starter);
8313 return 0; 8285 return 0;
8314} 8286}
8315 8287
@@ -8337,8 +8309,7 @@ qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
8337 return; 8309 return;
8338 if (!qeth_delete_ip(card, ipaddr)) 8310 if (!qeth_delete_ip(card, ipaddr))
8339 kfree(ipaddr); 8311 kfree(ipaddr);
8340 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8312 qeth_set_ip_addr_list(card);
8341 schedule_work(&card->kernel_thread_starter);
8342} 8313}
8343 8314
8344/** 8315/**
@@ -8380,8 +8351,7 @@ qeth_ip_event(struct notifier_block *this,
8380 default: 8351 default:
8381 break; 8352 break;
8382 } 8353 }
8383 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8354 qeth_set_ip_addr_list(card);
8384 schedule_work(&card->kernel_thread_starter);
8385out: 8355out:
8386 return NOTIFY_DONE; 8356 return NOTIFY_DONE;
8387} 8357}
@@ -8433,8 +8403,7 @@ qeth_ip6_event(struct notifier_block *this,
8433 default: 8403 default:
8434 break; 8404 break;
8435 } 8405 }
8436 if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8406 qeth_set_ip_addr_list(card);
8437 schedule_work(&card->kernel_thread_starter);
8438out: 8407out:
8439 return NOTIFY_DONE; 8408 return NOTIFY_DONE;
8440} 8409}
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index d72df5dae4ee..e16fe361436e 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1629,7 +1629,6 @@ static int nsp_cs_probe(struct pcmcia_device *link)
1629 /* General socket configuration */ 1629 /* General socket configuration */
1630 link->conf.Attributes = CONF_ENABLE_IRQ; 1630 link->conf.Attributes = CONF_ENABLE_IRQ;
1631 link->conf.IntType = INT_MEMORY_AND_IO; 1631 link->conf.IntType = INT_MEMORY_AND_IO;
1632 link->conf.Present = PRESENT_OPTION;
1633 1632
1634 ret = nsp_cs_config(link); 1633 ret = nsp_cs_config(link);
1635 1634
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index fb7acea60286..9fb0ea5c1fb9 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -895,7 +895,6 @@ SYM53C500_probe(struct pcmcia_device *link)
895 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 895 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
896 link->conf.Attributes = CONF_ENABLE_IRQ; 896 link->conf.Attributes = CONF_ENABLE_IRQ;
897 link->conf.IntType = INT_MEMORY_AND_IO; 897 link->conf.IntType = INT_MEMORY_AND_IO;
898 link->conf.Present = PRESENT_OPTION;
899 898
900 return SYM53C500_config(link); 899 return SYM53C500_config(link);
901} /* SYM53C500_attach */ 900} /* SYM53C500_attach */
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 51f3c739f7e1..5261f0af8b10 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2296,7 +2296,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
2296 local_irq_restore(flags); 2296 local_irq_restore(flags);
2297} 2297}
2298 2298
2299static int serial8250_console_setup(struct console *co, char *options) 2299static int __init serial8250_console_setup(struct console *co, char *options)
2300{ 2300{
2301 struct uart_port *port; 2301 struct uart_port *port;
2302 int baud = 9600; 2302 int baud = 9600;
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 9d11a75663e6..3c4b6c243712 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -789,7 +789,9 @@ static struct console mpc52xx_console = {
789static int __init 789static int __init
790mpc52xx_console_init(void) 790mpc52xx_console_init(void)
791{ 791{
792#if defined(CONFIG_PPC_MERGE)
792 mpc52xx_uart_of_enumerate(); 793 mpc52xx_uart_of_enumerate();
794#endif
793 register_console(&mpc52xx_console); 795 register_console(&mpc52xx_console);
794 return 0; 796 return 0;
795} 797}
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 24ee8be359f5..6377db1b446d 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -217,6 +217,7 @@ static const struct quirk_printer_struct quirk_printers[] = {
217 { 0x0409, 0xbef4, USBLP_QUIRK_BIDIR }, /* NEC Picty760 (HP OEM) */ 217 { 0x0409, 0xbef4, USBLP_QUIRK_BIDIR }, /* NEC Picty760 (HP OEM) */
218 { 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */ 218 { 0x0409, 0xf0be, USBLP_QUIRK_BIDIR }, /* NEC Picty920 (HP OEM) */
219 { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */ 219 { 0x0409, 0xf1be, USBLP_QUIRK_BIDIR }, /* NEC Picty800 (HP OEM) */
220 { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */
220 { 0, 0 } 221 { 0, 0 }
221}; 222};
222 223
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index c505b767cee1..5e628ae3aec7 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -268,6 +268,7 @@ static void ep_device_release(struct device *dev)
268 struct ep_device *ep_dev = to_ep_device(dev); 268 struct ep_device *ep_dev = to_ep_device(dev);
269 269
270 dev_dbg(dev, "%s called for %s\n", __FUNCTION__, dev->bus_id); 270 dev_dbg(dev, "%s called for %s\n", __FUNCTION__, dev->bus_id);
271 endpoint_free_minor(ep_dev);
271 kfree(ep_dev); 272 kfree(ep_dev);
272} 273}
273 274
@@ -349,7 +350,6 @@ void usb_remove_ep_files(struct usb_host_endpoint *endpoint)
349 sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress); 350 sprintf(name, "ep_%02x", endpoint->desc.bEndpointAddress);
350 sysfs_remove_link(&ep_dev->dev.parent->kobj, name); 351 sysfs_remove_link(&ep_dev->dev.parent->kobj, name);
351 sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp); 352 sysfs_remove_group(&ep_dev->dev.kobj, &ep_dev_attr_grp);
352 endpoint_free_minor(ep_dev);
353 device_unregister(&ep_dev->dev); 353 device_unregister(&ep_dev->dev);
354 endpoint->ep_dev = NULL; 354 endpoint->ep_dev = NULL;
355 destroy_endpoint_class(); 355 destroy_endpoint_class();
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 15d77c307930..cdcfd42843d4 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -42,6 +42,7 @@
42#include <linux/usb_gadget.h> 42#include <linux/usb_gadget.h>
43#include <linux/usb/otg.h> 43#include <linux/usb/otg.h>
44#include <linux/dma-mapping.h> 44#include <linux/dma-mapping.h>
45#include <linux/clk.h>
45 46
46#include <asm/byteorder.h> 47#include <asm/byteorder.h>
47#include <asm/io.h> 48#include <asm/io.h>
@@ -60,6 +61,11 @@
60/* bulk DMA seems to be behaving for both IN and OUT */ 61/* bulk DMA seems to be behaving for both IN and OUT */
61#define USE_DMA 62#define USE_DMA
62 63
64/* FIXME: OMAP2 currently has some problem in DMA mode */
65#ifdef CONFIG_ARCH_OMAP2
66#undef USE_DMA
67#endif
68
63/* ISO too */ 69/* ISO too */
64#define USE_ISO 70#define USE_ISO
65 71
@@ -99,7 +105,7 @@ static unsigned fifo_mode = 0;
99 * boot parameter "omap_udc:fifo_mode=42" 105 * boot parameter "omap_udc:fifo_mode=42"
100 */ 106 */
101module_param (fifo_mode, uint, 0); 107module_param (fifo_mode, uint, 0);
102MODULE_PARM_DESC (fifo_mode, "endpoint setup (0 == default)"); 108MODULE_PARM_DESC (fifo_mode, "endpoint configuration");
103 109
104#ifdef USE_DMA 110#ifdef USE_DMA
105static unsigned use_dma = 1; 111static unsigned use_dma = 1;
@@ -122,7 +128,7 @@ static const char driver_desc [] = DRIVER_DESC;
122/*-------------------------------------------------------------------------*/ 128/*-------------------------------------------------------------------------*/
123 129
124/* there's a notion of "current endpoint" for modifying endpoint 130/* there's a notion of "current endpoint" for modifying endpoint
125 * state, and PIO access to its FIFO. 131 * state, and PIO access to its FIFO.
126 */ 132 */
127 133
128static void use_ep(struct omap_ep *ep, u16 select) 134static void use_ep(struct omap_ep *ep, u16 select)
@@ -391,7 +397,7 @@ done(struct omap_ep *ep, struct omap_req *req, int status)
391#define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY) 397#define FIFO_EMPTY (UDC_NON_ISO_FIFO_EMPTY | UDC_ISO_FIFO_EMPTY)
392#define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY) 398#define FIFO_UNREADABLE (UDC_EP_HALTED | FIFO_EMPTY)
393 399
394static inline int 400static inline int
395write_packet(u8 *buf, struct omap_req *req, unsigned max) 401write_packet(u8 *buf, struct omap_req *req, unsigned max)
396{ 402{
397 unsigned len; 403 unsigned len;
@@ -456,7 +462,7 @@ static int write_fifo(struct omap_ep *ep, struct omap_req *req)
456 return is_last; 462 return is_last;
457} 463}
458 464
459static inline int 465static inline int
460read_packet(u8 *buf, struct omap_req *req, unsigned avail) 466read_packet(u8 *buf, struct omap_req *req, unsigned avail)
461{ 467{
462 unsigned len; 468 unsigned len;
@@ -542,9 +548,9 @@ static inline dma_addr_t dma_csac(unsigned lch)
542 /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 548 /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
543 * read before the DMA controller finished disabling the channel. 549 * read before the DMA controller finished disabling the channel.
544 */ 550 */
545 csac = omap_readw(OMAP_DMA_CSAC(lch)); 551 csac = OMAP_DMA_CSAC_REG(lch);
546 if (csac == 0) 552 if (csac == 0)
547 csac = omap_readw(OMAP_DMA_CSAC(lch)); 553 csac = OMAP_DMA_CSAC_REG(lch);
548 return csac; 554 return csac;
549} 555}
550 556
@@ -555,9 +561,9 @@ static inline dma_addr_t dma_cdac(unsigned lch)
555 /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is 561 /* omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
556 * read before the DMA controller finished disabling the channel. 562 * read before the DMA controller finished disabling the channel.
557 */ 563 */
558 cdac = omap_readw(OMAP_DMA_CDAC(lch)); 564 cdac = OMAP_DMA_CDAC_REG(lch);
559 if (cdac == 0) 565 if (cdac == 0)
560 cdac = omap_readw(OMAP_DMA_CDAC(lch)); 566 cdac = OMAP_DMA_CDAC_REG(lch);
561 return cdac; 567 return cdac;
562} 568}
563 569
@@ -582,7 +588,7 @@ static u16 dma_src_len(struct omap_ep *ep, dma_addr_t start)
582} 588}
583 589
584#define DMA_DEST_LAST(x) (cpu_is_omap15xx() \ 590#define DMA_DEST_LAST(x) (cpu_is_omap15xx() \
585 ? omap_readw(OMAP_DMA_CSAC(x)) /* really: CPC */ \ 591 ? OMAP_DMA_CSAC_REG(x) /* really: CPC */ \
586 : dma_cdac(x)) 592 : dma_cdac(x))
587 593
588static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start) 594static u16 dma_dest_len(struct omap_ep *ep, dma_addr_t start)
@@ -620,17 +626,19 @@ static void next_in_dma(struct omap_ep *ep, struct omap_req *req)
620 || (cpu_is_omap15xx() && length < ep->maxpacket)) { 626 || (cpu_is_omap15xx() && length < ep->maxpacket)) {
621 txdma_ctrl = UDC_TXN_EOT | length; 627 txdma_ctrl = UDC_TXN_EOT | length;
622 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8, 628 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S8,
623 length, 1, sync_mode); 629 length, 1, sync_mode, 0, 0);
624 } else { 630 } else {
625 length = min(length / ep->maxpacket, 631 length = min(length / ep->maxpacket,
626 (unsigned) UDC_TXN_TSC + 1); 632 (unsigned) UDC_TXN_TSC + 1);
627 txdma_ctrl = length; 633 txdma_ctrl = length;
628 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, 634 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
629 ep->ep.maxpacket >> 1, length, sync_mode); 635 ep->ep.maxpacket >> 1, length, sync_mode,
636 0, 0);
630 length *= ep->maxpacket; 637 length *= ep->maxpacket;
631 } 638 }
632 omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF, 639 omap_set_dma_src_params(ep->lch, OMAP_DMA_PORT_EMIFF,
633 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual); 640 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
641 0, 0);
634 642
635 omap_start_dma(ep->lch); 643 omap_start_dma(ep->lch);
636 ep->dma_counter = dma_csac(ep->lch); 644 ep->dma_counter = dma_csac(ep->lch);
@@ -675,9 +683,11 @@ static void next_out_dma(struct omap_ep *ep, struct omap_req *req)
675 req->dma_bytes = packets * ep->ep.maxpacket; 683 req->dma_bytes = packets * ep->ep.maxpacket;
676 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16, 684 omap_set_dma_transfer_params(ep->lch, OMAP_DMA_DATA_TYPE_S16,
677 ep->ep.maxpacket >> 1, packets, 685 ep->ep.maxpacket >> 1, packets,
678 OMAP_DMA_SYNC_ELEMENT); 686 OMAP_DMA_SYNC_ELEMENT,
687 0, 0);
679 omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF, 688 omap_set_dma_dest_params(ep->lch, OMAP_DMA_PORT_EMIFF,
680 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual); 689 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual,
690 0, 0);
681 ep->dma_counter = DMA_DEST_LAST(ep->lch); 691 ep->dma_counter = DMA_DEST_LAST(ep->lch);
682 692
683 UDC_RXDMA_REG(ep->dma_channel) = UDC_RXN_STOP | (packets - 1); 693 UDC_RXDMA_REG(ep->dma_channel) = UDC_RXN_STOP | (packets - 1);
@@ -820,7 +830,8 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
820 omap_set_dma_dest_params(ep->lch, 830 omap_set_dma_dest_params(ep->lch,
821 OMAP_DMA_PORT_TIPB, 831 OMAP_DMA_PORT_TIPB,
822 OMAP_DMA_AMODE_CONSTANT, 832 OMAP_DMA_AMODE_CONSTANT,
823 (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG)); 833 (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG),
834 0, 0);
824 } 835 }
825 } else { 836 } else {
826 status = omap_request_dma(OMAP_DMA_USB_W2FC_RX0 - 1 + channel, 837 status = omap_request_dma(OMAP_DMA_USB_W2FC_RX0 - 1 + channel,
@@ -831,7 +842,8 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
831 omap_set_dma_src_params(ep->lch, 842 omap_set_dma_src_params(ep->lch,
832 OMAP_DMA_PORT_TIPB, 843 OMAP_DMA_PORT_TIPB,
833 OMAP_DMA_AMODE_CONSTANT, 844 OMAP_DMA_AMODE_CONSTANT,
834 (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG)); 845 (unsigned long) io_v2p((u32)&UDC_DATA_DMA_REG),
846 0, 0);
835 /* EMIFF */ 847 /* EMIFF */
836 omap_set_dma_dest_burst_mode(ep->lch, 848 omap_set_dma_dest_burst_mode(ep->lch,
837 OMAP_DMA_DATA_BURST_4); 849 OMAP_DMA_DATA_BURST_4);
@@ -846,7 +858,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel)
846 858
847 /* channel type P: hw synch (fifo) */ 859 /* channel type P: hw synch (fifo) */
848 if (!cpu_is_omap15xx()) 860 if (!cpu_is_omap15xx())
849 omap_writew(2, OMAP_DMA_LCH_CTRL(ep->lch)); 861 OMAP1_DMA_LCH_CTRL_REG(ep->lch) = 2;
850 } 862 }
851 863
852just_restart: 864just_restart:
@@ -893,7 +905,7 @@ static void dma_channel_release(struct omap_ep *ep)
893 else 905 else
894 req = NULL; 906 req = NULL;
895 907
896 active = ((1 << 7) & omap_readl(OMAP_DMA_CCR(ep->lch))) != 0; 908 active = ((1 << 7) & OMAP_DMA_CCR_REG(ep->lch)) != 0;
897 909
898 DBG("%s release %s %cxdma%d %p\n", ep->ep.name, 910 DBG("%s release %s %cxdma%d %p\n", ep->ep.name,
899 active ? "active" : "idle", 911 active ? "active" : "idle",
@@ -1117,7 +1129,7 @@ static int omap_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1117 */ 1129 */
1118 dma_channel_release(ep); 1130 dma_channel_release(ep);
1119 dma_channel_claim(ep, channel); 1131 dma_channel_claim(ep, channel);
1120 } else 1132 } else
1121 done(ep, req, -ECONNRESET); 1133 done(ep, req, -ECONNRESET);
1122 spin_unlock_irqrestore(&ep->udc->lock, flags); 1134 spin_unlock_irqrestore(&ep->udc->lock, flags);
1123 return 0; 1135 return 0;
@@ -1153,7 +1165,7 @@ static int omap_ep_set_halt(struct usb_ep *_ep, int value)
1153 1165
1154 /* IN endpoints must already be idle */ 1166 /* IN endpoints must already be idle */
1155 if ((ep->bEndpointAddress & USB_DIR_IN) 1167 if ((ep->bEndpointAddress & USB_DIR_IN)
1156 && !list_empty(&ep->queue)) { 1168 && !list_empty(&ep->queue)) {
1157 status = -EAGAIN; 1169 status = -EAGAIN;
1158 goto done; 1170 goto done;
1159 } 1171 }
@@ -1298,6 +1310,23 @@ static void pullup_disable(struct omap_udc *udc)
1298 UDC_SYSCON1_REG &= ~UDC_PULLUP_EN; 1310 UDC_SYSCON1_REG &= ~UDC_PULLUP_EN;
1299} 1311}
1300 1312
1313static struct omap_udc *udc;
1314
1315static void omap_udc_enable_clock(int enable)
1316{
1317 if (udc == NULL || udc->dc_clk == NULL || udc->hhc_clk == NULL)
1318 return;
1319
1320 if (enable) {
1321 clk_enable(udc->dc_clk);
1322 clk_enable(udc->hhc_clk);
1323 udelay(100);
1324 } else {
1325 clk_disable(udc->hhc_clk);
1326 clk_disable(udc->dc_clk);
1327 }
1328}
1329
1301/* 1330/*
1302 * Called by whatever detects VBUS sessions: external transceiver 1331 * Called by whatever detects VBUS sessions: external transceiver
1303 * driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock. 1332 * driver, or maybe GPIO0 VBUS IRQ. May request 48 MHz clock.
@@ -1318,10 +1347,22 @@ static int omap_vbus_session(struct usb_gadget *gadget, int is_active)
1318 else 1347 else
1319 FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510; 1348 FUNC_MUX_CTRL_0_REG &= ~VBUS_CTRL_1510;
1320 } 1349 }
1350 if (udc->dc_clk != NULL && is_active) {
1351 if (!udc->clk_requested) {
1352 omap_udc_enable_clock(1);
1353 udc->clk_requested = 1;
1354 }
1355 }
1321 if (can_pullup(udc)) 1356 if (can_pullup(udc))
1322 pullup_enable(udc); 1357 pullup_enable(udc);
1323 else 1358 else
1324 pullup_disable(udc); 1359 pullup_disable(udc);
1360 if (udc->dc_clk != NULL && !is_active) {
1361 if (udc->clk_requested) {
1362 omap_udc_enable_clock(0);
1363 udc->clk_requested = 0;
1364 }
1365 }
1325 spin_unlock_irqrestore(&udc->lock, flags); 1366 spin_unlock_irqrestore(&udc->lock, flags);
1326 return 0; 1367 return 0;
1327} 1368}
@@ -1441,7 +1482,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1441 } 1482 }
1442 } 1483 }
1443 1484
1444 /* IN/OUT packets mean we're in the DATA or STATUS stage. 1485 /* IN/OUT packets mean we're in the DATA or STATUS stage.
1445 * This driver uses only uses protocol stalls (ep0 never halts), 1486 * This driver uses only uses protocol stalls (ep0 never halts),
1446 * and if we got this far the gadget driver already had a 1487 * and if we got this far the gadget driver already had a
1447 * chance to stall. Tries to be forgiving of host oddities. 1488 * chance to stall. Tries to be forgiving of host oddities.
@@ -1509,7 +1550,7 @@ static void ep0_irq(struct omap_udc *udc, u16 irq_src)
1509 } else if (stat == 0) 1550 } else if (stat == 0)
1510 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1551 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1511 UDC_EP_NUM_REG = 0; 1552 UDC_EP_NUM_REG = 0;
1512 1553
1513 /* activate status stage */ 1554 /* activate status stage */
1514 if (stat == 1) { 1555 if (stat == 1) {
1515 done(ep0, req, 0); 1556 done(ep0, req, 0);
@@ -1866,7 +1907,7 @@ static void pio_out_timer(unsigned long _ep)
1866 1907
1867 spin_lock_irqsave(&ep->udc->lock, flags); 1908 spin_lock_irqsave(&ep->udc->lock, flags);
1868 if (!list_empty(&ep->queue) && ep->ackwait) { 1909 if (!list_empty(&ep->queue) && ep->ackwait) {
1869 use_ep(ep, 0); 1910 use_ep(ep, UDC_EP_SEL);
1870 stat_flg = UDC_STAT_FLG_REG; 1911 stat_flg = UDC_STAT_FLG_REG;
1871 1912
1872 if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN) 1913 if ((stat_flg & UDC_ACK) && (!(stat_flg & UDC_FIFO_EN)
@@ -1876,12 +1917,12 @@ static void pio_out_timer(unsigned long _ep)
1876 VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg); 1917 VDBG("%s: lose, %04x\n", ep->ep.name, stat_flg);
1877 req = container_of(ep->queue.next, 1918 req = container_of(ep->queue.next,
1878 struct omap_req, queue); 1919 struct omap_req, queue);
1879 UDC_EP_NUM_REG = ep->bEndpointAddress | UDC_EP_SEL;
1880 (void) read_fifo(ep, req); 1920 (void) read_fifo(ep, req);
1881 UDC_EP_NUM_REG = ep->bEndpointAddress; 1921 UDC_EP_NUM_REG = ep->bEndpointAddress;
1882 UDC_CTRL_REG = UDC_SET_FIFO_EN; 1922 UDC_CTRL_REG = UDC_SET_FIFO_EN;
1883 ep->ackwait = 1 + ep->double_buf; 1923 ep->ackwait = 1 + ep->double_buf;
1884 } 1924 } else
1925 deselect_ep();
1885 } 1926 }
1886 mod_timer(&ep->timer, PIO_OUT_TIMEOUT); 1927 mod_timer(&ep->timer, PIO_OUT_TIMEOUT);
1887 spin_unlock_irqrestore(&ep->udc->lock, flags); 1928 spin_unlock_irqrestore(&ep->udc->lock, flags);
@@ -2028,7 +2069,17 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
2028 2069
2029/*-------------------------------------------------------------------------*/ 2070/*-------------------------------------------------------------------------*/
2030 2071
2031static struct omap_udc *udc; 2072static inline int machine_needs_vbus_session(void)
2073{
2074 return (machine_is_omap_innovator()
2075 || machine_is_omap_osk()
2076 || machine_is_omap_apollon()
2077#ifndef CONFIG_MACH_OMAP_H4_OTG
2078 || machine_is_omap_h4()
2079#endif
2080 || machine_is_sx1()
2081 );
2082}
2032 2083
2033int usb_gadget_register_driver (struct usb_gadget_driver *driver) 2084int usb_gadget_register_driver (struct usb_gadget_driver *driver)
2034{ 2085{
@@ -2070,6 +2121,9 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
2070 udc->gadget.dev.driver = &driver->driver; 2121 udc->gadget.dev.driver = &driver->driver;
2071 spin_unlock_irqrestore(&udc->lock, flags); 2122 spin_unlock_irqrestore(&udc->lock, flags);
2072 2123
2124 if (udc->dc_clk != NULL)
2125 omap_udc_enable_clock(1);
2126
2073 status = driver->bind (&udc->gadget); 2127 status = driver->bind (&udc->gadget);
2074 if (status) { 2128 if (status) {
2075 DBG("bind to %s --> %d\n", driver->driver.name, status); 2129 DBG("bind to %s --> %d\n", driver->driver.name, status);
@@ -2103,10 +2157,12 @@ int usb_gadget_register_driver (struct usb_gadget_driver *driver)
2103 /* boards that don't have VBUS sensing can't autogate 48MHz; 2157 /* boards that don't have VBUS sensing can't autogate 48MHz;
2104 * can't enter deep sleep while a gadget driver is active. 2158 * can't enter deep sleep while a gadget driver is active.
2105 */ 2159 */
2106 if (machine_is_omap_innovator() || machine_is_omap_osk()) 2160 if (machine_needs_vbus_session())
2107 omap_vbus_session(&udc->gadget, 1); 2161 omap_vbus_session(&udc->gadget, 1);
2108 2162
2109done: 2163done:
2164 if (udc->dc_clk != NULL)
2165 omap_udc_enable_clock(0);
2110 return status; 2166 return status;
2111} 2167}
2112EXPORT_SYMBOL(usb_gadget_register_driver); 2168EXPORT_SYMBOL(usb_gadget_register_driver);
@@ -2121,7 +2177,10 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
2121 if (!driver || driver != udc->driver || !driver->unbind) 2177 if (!driver || driver != udc->driver || !driver->unbind)
2122 return -EINVAL; 2178 return -EINVAL;
2123 2179
2124 if (machine_is_omap_innovator() || machine_is_omap_osk()) 2180 if (udc->dc_clk != NULL)
2181 omap_udc_enable_clock(1);
2182
2183 if (machine_needs_vbus_session())
2125 omap_vbus_session(&udc->gadget, 0); 2184 omap_vbus_session(&udc->gadget, 0);
2126 2185
2127 if (udc->transceiver) 2186 if (udc->transceiver)
@@ -2137,6 +2196,8 @@ int usb_gadget_unregister_driver (struct usb_gadget_driver *driver)
2137 udc->gadget.dev.driver = NULL; 2196 udc->gadget.dev.driver = NULL;
2138 udc->driver = NULL; 2197 udc->driver = NULL;
2139 2198
2199 if (udc->dc_clk != NULL)
2200 omap_udc_enable_clock(0);
2140 DBG("unregistered driver '%s'\n", driver->driver.name); 2201 DBG("unregistered driver '%s'\n", driver->driver.name);
2141 return status; 2202 return status;
2142} 2203}
@@ -2219,7 +2280,7 @@ static char *trx_mode(unsigned m, int enabled)
2219 case 0: return enabled ? "*6wire" : "unused"; 2280 case 0: return enabled ? "*6wire" : "unused";
2220 case 1: return "4wire"; 2281 case 1: return "4wire";
2221 case 2: return "3wire"; 2282 case 2: return "3wire";
2222 case 3: return "6wire"; 2283 case 3: return "6wire";
2223 default: return "unknown"; 2284 default: return "unknown";
2224 } 2285 }
2225} 2286}
@@ -2228,11 +2289,18 @@ static int proc_otg_show(struct seq_file *s)
2228{ 2289{
2229 u32 tmp; 2290 u32 tmp;
2230 u32 trans; 2291 u32 trans;
2292 char *ctrl_name;
2231 2293
2232 tmp = OTG_REV_REG; 2294 tmp = OTG_REV_REG;
2233 trans = USB_TRANSCEIVER_CTRL_REG; 2295 if (cpu_is_omap24xx()) {
2234 seq_printf(s, "\nOTG rev %d.%d, transceiver_ctrl %05x\n", 2296 ctrl_name = "control_devconf";
2235 tmp >> 4, tmp & 0xf, trans); 2297 trans = CONTROL_DEVCONF_REG;
2298 } else {
2299 ctrl_name = "tranceiver_ctrl";
2300 trans = USB_TRANSCEIVER_CTRL_REG;
2301 }
2302 seq_printf(s, "\nOTG rev %d.%d, %s %05x\n",
2303 tmp >> 4, tmp & 0xf, ctrl_name, trans);
2236 tmp = OTG_SYSCON_1_REG; 2304 tmp = OTG_SYSCON_1_REG;
2237 seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s," 2305 seq_printf(s, "otg_syscon1 %08x usb2 %s, usb1 %s, usb0 %s,"
2238 FOURBITS "\n", tmp, 2306 FOURBITS "\n", tmp,
@@ -2307,7 +2375,7 @@ static int proc_udc_show(struct seq_file *s, void *_)
2307 driver_desc, 2375 driver_desc,
2308 use_dma ? " (dma)" : ""); 2376 use_dma ? " (dma)" : "");
2309 2377
2310 tmp = UDC_REV_REG & 0xff; 2378 tmp = UDC_REV_REG & 0xff;
2311 seq_printf(s, 2379 seq_printf(s,
2312 "UDC rev %d.%d, fifo mode %d, gadget %s\n" 2380 "UDC rev %d.%d, fifo mode %d, gadget %s\n"
2313 "hmc %d, transceiver %s\n", 2381 "hmc %d, transceiver %s\n",
@@ -2315,11 +2383,16 @@ static int proc_udc_show(struct seq_file *s, void *_)
2315 fifo_mode, 2383 fifo_mode,
2316 udc->driver ? udc->driver->driver.name : "(none)", 2384 udc->driver ? udc->driver->driver.name : "(none)",
2317 HMC, 2385 HMC,
2318 udc->transceiver ? udc->transceiver->label : "(none)"); 2386 udc->transceiver
2319 seq_printf(s, "ULPD control %04x req %04x status %04x\n", 2387 ? udc->transceiver->label
2320 __REG16(ULPD_CLOCK_CTRL), 2388 : ((cpu_is_omap1710() || cpu_is_omap24xx())
2321 __REG16(ULPD_SOFT_REQ), 2389 ? "external" : "(none)"));
2322 __REG16(ULPD_STATUS_REQ)); 2390 if (cpu_class_is_omap1()) {
2391 seq_printf(s, "ULPD control %04x req %04x status %04x\n",
2392 __REG16(ULPD_CLOCK_CTRL),
2393 __REG16(ULPD_SOFT_REQ),
2394 __REG16(ULPD_STATUS_REQ));
2395 }
2323 2396
2324 /* OTG controller registers */ 2397 /* OTG controller registers */
2325 if (!cpu_is_omap15xx()) 2398 if (!cpu_is_omap15xx())
@@ -2504,9 +2577,10 @@ omap_ep_setup(char *name, u8 addr, u8 type,
2504 dbuf = 1; 2577 dbuf = 1;
2505 } else { 2578 } else {
2506 /* double-buffering "not supported" on 15xx, 2579 /* double-buffering "not supported" on 15xx,
2507 * and ignored for PIO-IN on 16xx 2580 * and ignored for PIO-IN on newer chips
2581 * (for more reliable behavior)
2508 */ 2582 */
2509 if (!use_dma || cpu_is_omap15xx()) 2583 if (!use_dma || cpu_is_omap15xx() || cpu_is_omap24xx())
2510 dbuf = 0; 2584 dbuf = 0;
2511 2585
2512 switch (maxp) { 2586 switch (maxp) {
@@ -2549,7 +2623,7 @@ omap_ep_setup(char *name, u8 addr, u8 type,
2549 ep->bEndpointAddress = addr; 2623 ep->bEndpointAddress = addr;
2550 ep->bmAttributes = type; 2624 ep->bmAttributes = type;
2551 ep->double_buf = dbuf; 2625 ep->double_buf = dbuf;
2552 ep->udc = udc; 2626 ep->udc = udc;
2553 2627
2554 ep->ep.name = ep->name; 2628 ep->ep.name = ep->name;
2555 ep->ep.ops = &omap_ep_ops; 2629 ep->ep.ops = &omap_ep_ops;
@@ -2709,15 +2783,37 @@ static int __init omap_udc_probe(struct platform_device *pdev)
2709 struct otg_transceiver *xceiv = NULL; 2783 struct otg_transceiver *xceiv = NULL;
2710 const char *type = NULL; 2784 const char *type = NULL;
2711 struct omap_usb_config *config = pdev->dev.platform_data; 2785 struct omap_usb_config *config = pdev->dev.platform_data;
2786 struct clk *dc_clk;
2787 struct clk *hhc_clk;
2712 2788
2713 /* NOTE: "knows" the order of the resources! */ 2789 /* NOTE: "knows" the order of the resources! */
2714 if (!request_mem_region(pdev->resource[0].start, 2790 if (!request_mem_region(pdev->resource[0].start,
2715 pdev->resource[0].end - pdev->resource[0].start + 1, 2791 pdev->resource[0].end - pdev->resource[0].start + 1,
2716 driver_name)) { 2792 driver_name)) {
2717 DBG("request_mem_region failed\n"); 2793 DBG("request_mem_region failed\n");
2718 return -EBUSY; 2794 return -EBUSY;
2719 } 2795 }
2720 2796
2797 if (cpu_is_omap16xx()) {
2798 dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
2799 hhc_clk = clk_get(&pdev->dev, "usb_hhc_ck");
2800 BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
2801 /* can't use omap_udc_enable_clock yet */
2802 clk_enable(dc_clk);
2803 clk_enable(hhc_clk);
2804 udelay(100);
2805 }
2806
2807 if (cpu_is_omap24xx()) {
2808 dc_clk = clk_get(&pdev->dev, "usb_fck");
2809 hhc_clk = clk_get(&pdev->dev, "usb_l4_ick");
2810 BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
2811 /* can't use omap_udc_enable_clock yet */
2812 clk_enable(dc_clk);
2813 clk_enable(hhc_clk);
2814 udelay(100);
2815 }
2816
2721 INFO("OMAP UDC rev %d.%d%s\n", 2817 INFO("OMAP UDC rev %d.%d%s\n",
2722 UDC_REV_REG >> 4, UDC_REV_REG & 0xf, 2818 UDC_REV_REG >> 4, UDC_REV_REG & 0xf,
2723 config->otg ? ", Mini-AB" : ""); 2819 config->otg ? ", Mini-AB" : "");
@@ -2727,7 +2823,7 @@ static int __init omap_udc_probe(struct platform_device *pdev)
2727 hmc = HMC_1510; 2823 hmc = HMC_1510;
2728 type = "(unknown)"; 2824 type = "(unknown)";
2729 2825
2730 if (machine_is_omap_innovator()) { 2826 if (machine_is_omap_innovator() || machine_is_sx1()) {
2731 /* just set up software VBUS detect, and then 2827 /* just set up software VBUS detect, and then
2732 * later rig it so we always report VBUS. 2828 * later rig it so we always report VBUS.
2733 * FIXME without really sensing VBUS, we can't 2829 * FIXME without really sensing VBUS, we can't
@@ -2756,6 +2852,15 @@ static int __init omap_udc_probe(struct platform_device *pdev)
2756 } 2852 }
2757 2853
2758 hmc = HMC_1610; 2854 hmc = HMC_1610;
2855
2856 if (cpu_is_omap24xx()) {
2857 /* this could be transceiverless in one of the
2858 * "we don't need to know" modes.
2859 */
2860 type = "external";
2861 goto known;
2862 }
2863
2759 switch (hmc) { 2864 switch (hmc) {
2760 case 0: /* POWERUP DEFAULT == 0 */ 2865 case 0: /* POWERUP DEFAULT == 0 */
2761 case 4: 2866 case 4:
@@ -2794,6 +2899,7 @@ bad_on_1710:
2794 goto cleanup0; 2899 goto cleanup0;
2795 } 2900 }
2796 } 2901 }
2902known:
2797 INFO("hmc mode %d, %s transceiver\n", hmc, type); 2903 INFO("hmc mode %d, %s transceiver\n", hmc, type);
2798 2904
2799 /* a "gadget" abstracts/virtualizes the controller */ 2905 /* a "gadget" abstracts/virtualizes the controller */
@@ -2818,8 +2924,8 @@ bad_on_1710:
2818 status = request_irq(pdev->resource[1].start, omap_udc_irq, 2924 status = request_irq(pdev->resource[1].start, omap_udc_irq,
2819 IRQF_SAMPLE_RANDOM, driver_name, udc); 2925 IRQF_SAMPLE_RANDOM, driver_name, udc);
2820 if (status != 0) { 2926 if (status != 0) {
2821 ERR( "can't get irq %ld, err %d\n", 2927 ERR("can't get irq %d, err %d\n",
2822 pdev->resource[1].start, status); 2928 (int) pdev->resource[1].start, status);
2823 goto cleanup1; 2929 goto cleanup1;
2824 } 2930 }
2825 2931
@@ -2827,24 +2933,41 @@ bad_on_1710:
2827 status = request_irq(pdev->resource[2].start, omap_udc_pio_irq, 2933 status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
2828 IRQF_SAMPLE_RANDOM, "omap_udc pio", udc); 2934 IRQF_SAMPLE_RANDOM, "omap_udc pio", udc);
2829 if (status != 0) { 2935 if (status != 0) {
2830 ERR( "can't get irq %ld, err %d\n", 2936 ERR("can't get irq %d, err %d\n",
2831 pdev->resource[2].start, status); 2937 (int) pdev->resource[2].start, status);
2832 goto cleanup2; 2938 goto cleanup2;
2833 } 2939 }
2834#ifdef USE_ISO 2940#ifdef USE_ISO
2835 status = request_irq(pdev->resource[3].start, omap_udc_iso_irq, 2941 status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
2836 IRQF_DISABLED, "omap_udc iso", udc); 2942 IRQF_DISABLED, "omap_udc iso", udc);
2837 if (status != 0) { 2943 if (status != 0) {
2838 ERR("can't get irq %ld, err %d\n", 2944 ERR("can't get irq %d, err %d\n",
2839 pdev->resource[3].start, status); 2945 (int) pdev->resource[3].start, status);
2840 goto cleanup3; 2946 goto cleanup3;
2841 } 2947 }
2842#endif 2948#endif
2949 if (cpu_is_omap16xx()) {
2950 udc->dc_clk = dc_clk;
2951 udc->hhc_clk = hhc_clk;
2952 clk_disable(hhc_clk);
2953 clk_disable(dc_clk);
2954 }
2955
2956 if (cpu_is_omap24xx()) {
2957 udc->dc_clk = dc_clk;
2958 udc->hhc_clk = hhc_clk;
2959 /* FIXME OMAP2 don't release hhc & dc clock */
2960#if 0
2961 clk_disable(hhc_clk);
2962 clk_disable(dc_clk);
2963#endif
2964 }
2843 2965
2844 create_proc_file(); 2966 create_proc_file();
2845 device_add(&udc->gadget.dev); 2967 status = device_add(&udc->gadget.dev);
2846 return 0; 2968 if (!status)
2847 2969 return status;
2970 /* If fail, fall through */
2848#ifdef USE_ISO 2971#ifdef USE_ISO
2849cleanup3: 2972cleanup3:
2850 free_irq(pdev->resource[2].start, udc); 2973 free_irq(pdev->resource[2].start, udc);
@@ -2860,8 +2983,17 @@ cleanup1:
2860cleanup0: 2983cleanup0:
2861 if (xceiv) 2984 if (xceiv)
2862 put_device(xceiv->dev); 2985 put_device(xceiv->dev);
2986
2987 if (cpu_is_omap16xx() || cpu_is_omap24xx()) {
2988 clk_disable(hhc_clk);
2989 clk_disable(dc_clk);
2990 clk_put(hhc_clk);
2991 clk_put(dc_clk);
2992 }
2993
2863 release_mem_region(pdev->resource[0].start, 2994 release_mem_region(pdev->resource[0].start,
2864 pdev->resource[0].end - pdev->resource[0].start + 1); 2995 pdev->resource[0].end - pdev->resource[0].start + 1);
2996
2865 return status; 2997 return status;
2866} 2998}
2867 2999
@@ -2891,6 +3023,13 @@ static int __exit omap_udc_remove(struct platform_device *pdev)
2891 free_irq(pdev->resource[2].start, udc); 3023 free_irq(pdev->resource[2].start, udc);
2892 free_irq(pdev->resource[1].start, udc); 3024 free_irq(pdev->resource[1].start, udc);
2893 3025
3026 if (udc->dc_clk) {
3027 if (udc->clk_requested)
3028 omap_udc_enable_clock(0);
3029 clk_put(udc->hhc_clk);
3030 clk_put(udc->dc_clk);
3031 }
3032
2894 release_mem_region(pdev->resource[0].start, 3033 release_mem_region(pdev->resource[0].start,
2895 pdev->resource[0].end - pdev->resource[0].start + 1); 3034 pdev->resource[0].end - pdev->resource[0].start + 1);
2896 3035
diff --git a/drivers/usb/gadget/omap_udc.h b/drivers/usb/gadget/omap_udc.h
index 652ee4627344..1dc398bb9ab2 100644
--- a/drivers/usb/gadget/omap_udc.h
+++ b/drivers/usb/gadget/omap_udc.h
@@ -175,6 +175,9 @@ struct omap_udc {
175 unsigned ep0_reset_config:1; 175 unsigned ep0_reset_config:1;
176 unsigned ep0_setup:1; 176 unsigned ep0_setup:1;
177 struct completion *done; 177 struct completion *done;
178 struct clk *dc_clk;
179 struct clk *hhc_clk;
180 unsigned clk_requested:1;
178}; 181};
179 182
180/*-------------------------------------------------------------------------*/ 183/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index acd101caeeeb..e0d4c2358b39 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -209,24 +209,16 @@ static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
209 209
210static int remote_wakeup_is_broken(struct uhci_hcd *uhci) 210static int remote_wakeup_is_broken(struct uhci_hcd *uhci)
211{ 211{
212 static struct dmi_system_id broken_wakeup_table[] = {
213 {
214 .ident = "Asus A7V8X",
215 .matches = {
216 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK"),
217 DMI_MATCH(DMI_BOARD_NAME, "A7V8X"),
218 DMI_MATCH(DMI_BOARD_VERSION, "REV 1.xx"),
219 }
220 },
221 { }
222 };
223 int port; 212 int port;
213 char *sys_info;
214 static char bad_Asus_board[] = "A7V8X";
224 215
225 /* One of Asus's motherboards has a bug which causes it to 216 /* One of Asus's motherboards has a bug which causes it to
226 * wake up immediately from suspend-to-RAM if any of the ports 217 * wake up immediately from suspend-to-RAM if any of the ports
227 * are connected. In such cases we will not set EGSM. 218 * are connected. In such cases we will not set EGSM.
228 */ 219 */
229 if (dmi_check_system(broken_wakeup_table)) { 220 sys_info = dmi_get_system_info(DMI_BOARD_NAME);
221 if (sys_info && !strcmp(sys_info, bad_Asus_board)) {
230 for (port = 0; port < uhci->rh_numports; ++port) { 222 for (port = 0; port < uhci->rh_numports; ++port) {
231 if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & 223 if (inw(uhci->io_addr + USBPORTSC1 + port * 2) &
232 USBPORTSC_CCS) 224 USBPORTSC_CCS)
@@ -265,7 +257,9 @@ __acquires(uhci->lock)
265 int_enable = USBINTR_RESUME; 257 int_enable = USBINTR_RESUME;
266 if (remote_wakeup_is_broken(uhci)) 258 if (remote_wakeup_is_broken(uhci))
267 egsm_enable = 0; 259 egsm_enable = 0;
268 if (resume_detect_interrupts_are_broken(uhci) || !egsm_enable) 260 if (resume_detect_interrupts_are_broken(uhci) || !egsm_enable ||
261 !device_may_wakeup(
262 &uhci_to_hcd(uhci)->self.root_hub->dev))
269 uhci->working_RD = int_enable = 0; 263 uhci->working_RD = int_enable = 0;
270 264
271 outw(int_enable, uhci->io_addr + USBINTR); 265 outw(int_enable, uhci->io_addr + USBINTR);
diff --git a/drivers/usb/input/Kconfig b/drivers/usb/input/Kconfig
index f877cd4f317a..c7d887540d8d 100644
--- a/drivers/usb/input/Kconfig
+++ b/drivers/usb/input/Kconfig
@@ -7,15 +7,13 @@ comment "USB Input Devices"
7config USB_HID 7config USB_HID
8 tristate "USB Human Interface Device (full HID) support" 8 tristate "USB Human Interface Device (full HID) support"
9 default y 9 default y
10 depends on USB 10 depends on USB && INPUT
11 select HID 11 select HID
12 ---help--- 12 ---help---
13 Say Y here if you want full HID support to connect USB keyboards, 13 Say Y here if you want full HID support to connect USB keyboards,
14 mice, joysticks, graphic tablets, or any other HID based devices 14 mice, joysticks, graphic tablets, or any other HID based devices
15 to your computer via USB. You also need to select HID Input layer 15 to your computer via USB, as well as Uninterruptible Power Supply
16 support (below) if you want to use keyboards, mice, joysticks and 16 (UPS) and monitor control devices.
17 the like ... as well as Uninterruptible Power Supply (UPS) and
18 monitor control devices.
19 17
20 You can't use this driver and the HIDBP (Boot Protocol) keyboard 18 You can't use this driver and the HIDBP (Boot Protocol) keyboard
21 and mouse drivers at the same time. More information is available: 19 and mouse drivers at the same time. More information is available:
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c
index 89fa6885709b..6e739efee6f7 100644
--- a/drivers/usb/input/hid-core.c
+++ b/drivers/usb/input/hid-core.c
@@ -106,18 +106,18 @@ static void hid_reset(struct work_struct *work)
106 106
107 if (test_bit(HID_CLEAR_HALT, &usbhid->iofl)) { 107 if (test_bit(HID_CLEAR_HALT, &usbhid->iofl)) {
108 dev_dbg(&usbhid->intf->dev, "clear halt\n"); 108 dev_dbg(&usbhid->intf->dev, "clear halt\n");
109 rc = usb_clear_halt(to_usb_device(hid->dev), usbhid->urbin->pipe); 109 rc = usb_clear_halt(hid_to_usb_dev(hid), usbhid->urbin->pipe);
110 clear_bit(HID_CLEAR_HALT, &usbhid->iofl); 110 clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
111 hid_start_in(hid); 111 hid_start_in(hid);
112 } 112 }
113 113
114 else if (test_bit(HID_RESET_PENDING, &usbhid->iofl)) { 114 else if (test_bit(HID_RESET_PENDING, &usbhid->iofl)) {
115 dev_dbg(&usbhid->intf->dev, "resetting device\n"); 115 dev_dbg(&usbhid->intf->dev, "resetting device\n");
116 rc = rc_lock = usb_lock_device_for_reset(to_usb_device(hid->dev), usbhid->intf); 116 rc = rc_lock = usb_lock_device_for_reset(hid_to_usb_dev(hid), usbhid->intf);
117 if (rc_lock >= 0) { 117 if (rc_lock >= 0) {
118 rc = usb_reset_composite_device(to_usb_device(hid->dev), usbhid->intf); 118 rc = usb_reset_composite_device(hid_to_usb_dev(hid), usbhid->intf);
119 if (rc_lock) 119 if (rc_lock)
120 usb_unlock_device(to_usb_device(hid->dev)); 120 usb_unlock_device(hid_to_usb_dev(hid));
121 } 121 }
122 clear_bit(HID_RESET_PENDING, &usbhid->iofl); 122 clear_bit(HID_RESET_PENDING, &usbhid->iofl);
123 } 123 }
@@ -129,8 +129,8 @@ static void hid_reset(struct work_struct *work)
129 break; 129 break;
130 default: 130 default:
131 err("can't reset device, %s-%s/input%d, status %d", 131 err("can't reset device, %s-%s/input%d, status %d",
132 to_usb_device(hid->dev)->bus->bus_name, 132 hid_to_usb_dev(hid)->bus->bus_name,
133 to_usb_device(hid->dev)->devpath, 133 hid_to_usb_dev(hid)->devpath,
134 usbhid->ifnum, rc); 134 usbhid->ifnum, rc);
135 /* FALLTHROUGH */ 135 /* FALLTHROUGH */
136 case -EHOSTUNREACH: 136 case -EHOSTUNREACH:
@@ -217,8 +217,8 @@ static void hid_irq_in(struct urb *urb)
217 clear_bit(HID_IN_RUNNING, &usbhid->iofl); 217 clear_bit(HID_IN_RUNNING, &usbhid->iofl);
218 if (status != -EPERM) { 218 if (status != -EPERM) {
219 err("can't resubmit intr, %s-%s/input%d, status %d", 219 err("can't resubmit intr, %s-%s/input%d, status %d",
220 to_usb_device(hid->dev)->bus->bus_name, 220 hid_to_usb_dev(hid)->bus->bus_name,
221 to_usb_device(hid->dev)->devpath, 221 hid_to_usb_dev(hid)->devpath,
222 usbhid->ifnum, status); 222 usbhid->ifnum, status);
223 hid_io_error(hid); 223 hid_io_error(hid);
224 } 224 }
@@ -251,7 +251,7 @@ static int hid_submit_out(struct hid_device *hid)
251 251
252 hid_output_report(report, usbhid->outbuf); 252 hid_output_report(report, usbhid->outbuf);
253 usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0); 253 usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0);
254 usbhid->urbout->dev = to_usb_device(hid->dev); 254 usbhid->urbout->dev = hid_to_usb_dev(hid);
255 255
256 dbg("submitting out urb"); 256 dbg("submitting out urb");
257 257
@@ -276,13 +276,13 @@ static int hid_submit_ctrl(struct hid_device *hid)
276 len = ((report->size - 1) >> 3) + 1 + (report->id > 0); 276 len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
277 if (dir == USB_DIR_OUT) { 277 if (dir == USB_DIR_OUT) {
278 hid_output_report(report, usbhid->ctrlbuf); 278 hid_output_report(report, usbhid->ctrlbuf);
279 usbhid->urbctrl->pipe = usb_sndctrlpipe(to_usb_device(hid->dev), 0); 279 usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
280 usbhid->urbctrl->transfer_buffer_length = len; 280 usbhid->urbctrl->transfer_buffer_length = len;
281 } else { 281 } else {
282 int maxpacket, padlen; 282 int maxpacket, padlen;
283 283
284 usbhid->urbctrl->pipe = usb_rcvctrlpipe(to_usb_device(hid->dev), 0); 284 usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
285 maxpacket = usb_maxpacket(to_usb_device(hid->dev), usbhid->urbctrl->pipe, 0); 285 maxpacket = usb_maxpacket(hid_to_usb_dev(hid), usbhid->urbctrl->pipe, 0);
286 if (maxpacket > 0) { 286 if (maxpacket > 0) {
287 padlen = (len + maxpacket - 1) / maxpacket; 287 padlen = (len + maxpacket - 1) / maxpacket;
288 padlen *= maxpacket; 288 padlen *= maxpacket;
@@ -292,7 +292,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
292 padlen = 0; 292 padlen = 0;
293 usbhid->urbctrl->transfer_buffer_length = padlen; 293 usbhid->urbctrl->transfer_buffer_length = padlen;
294 } 294 }
295 usbhid->urbctrl->dev = to_usb_device(hid->dev); 295 usbhid->urbctrl->dev = hid_to_usb_dev(hid);
296 296
297 usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir; 297 usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
298 usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT : HID_REQ_GET_REPORT; 298 usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT : HID_REQ_GET_REPORT;
@@ -582,6 +582,8 @@ void usbhid_init_reports(struct hid_device *hid)
582} 582}
583 583
584#define USB_VENDOR_ID_GTCO 0x078c 584#define USB_VENDOR_ID_GTCO 0x078c
585#define USB_VENDOR_ID_GTCO_IPANEL_1 0x08ca
586#define USB_VENDOR_ID_GTCO_IPANEL_2 0x5543
585#define USB_DEVICE_ID_GTCO_90 0x0090 587#define USB_DEVICE_ID_GTCO_90 0x0090
586#define USB_DEVICE_ID_GTCO_100 0x0100 588#define USB_DEVICE_ID_GTCO_100 0x0100
587#define USB_DEVICE_ID_GTCO_101 0x0101 589#define USB_DEVICE_ID_GTCO_101 0x0101
@@ -627,6 +629,9 @@ void usbhid_init_reports(struct hid_device *hid)
627#define USB_DEVICE_ID_GTCO_1004 0x1004 629#define USB_DEVICE_ID_GTCO_1004 0x1004
628#define USB_DEVICE_ID_GTCO_1005 0x1005 630#define USB_DEVICE_ID_GTCO_1005 0x1005
629#define USB_DEVICE_ID_GTCO_1006 0x1006 631#define USB_DEVICE_ID_GTCO_1006 0x1006
632#define USB_DEVICE_ID_GTCO_10 0x0010
633#define USB_DEVICE_ID_GTCO_8 0x0008
634#define USB_DEVICE_ID_GTCO_d 0x000d
630 635
631#define USB_VENDOR_ID_WACOM 0x056a 636#define USB_VENDOR_ID_WACOM 0x056a
632 637
@@ -875,6 +880,9 @@ static const struct hid_blacklist {
875 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004, HID_QUIRK_IGNORE }, 880 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004, HID_QUIRK_IGNORE },
876 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005, HID_QUIRK_IGNORE }, 881 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005, HID_QUIRK_IGNORE },
877 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006, HID_QUIRK_IGNORE }, 882 { USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006, HID_QUIRK_IGNORE },
883 { USB_VENDOR_ID_GTCO_IPANEL_1, USB_DEVICE_ID_GTCO_10, HID_QUIRK_IGNORE },
884 { USB_VENDOR_ID_GTCO_IPANEL_2, USB_DEVICE_ID_GTCO_8, HID_QUIRK_IGNORE },
885 { USB_VENDOR_ID_GTCO_IPANEL_2, USB_DEVICE_ID_GTCO_d, HID_QUIRK_IGNORE },
878 { USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO, HID_QUIRK_IGNORE }, 886 { USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO, HID_QUIRK_IGNORE },
879 { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY, HID_QUIRK_IGNORE }, 887 { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY, HID_QUIRK_IGNORE },
880 { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY, HID_QUIRK_IGNORE }, 888 { USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY, HID_QUIRK_IGNORE },
@@ -951,7 +959,7 @@ static const struct hid_blacklist {
951 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD}, 959 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
952 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 960 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
953 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN }, 961 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI, HID_QUIRK_POWERBOOK_HAS_FN },
954 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN }, 962 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_POWERBOOK_ISO_KEYBOARD},
955 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN }, 963 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS, HID_QUIRK_POWERBOOK_HAS_FN },
956 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 964 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN },
957 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN }, 965 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN },
@@ -1187,7 +1195,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
1187 1195
1188 hid->version = le16_to_cpu(hdesc->bcdHID); 1196 hid->version = le16_to_cpu(hdesc->bcdHID);
1189 hid->country = hdesc->bCountryCode; 1197 hid->country = hdesc->bCountryCode;
1190 hid->dev = &dev->dev; 1198 hid->dev = &intf->dev;
1191 usbhid->intf = intf; 1199 usbhid->intf = intf;
1192 usbhid->ifnum = interface->desc.bInterfaceNumber; 1200 usbhid->ifnum = interface->desc.bInterfaceNumber;
1193 1201
@@ -1282,7 +1290,7 @@ static void hid_disconnect(struct usb_interface *intf)
1282 usb_free_urb(usbhid->urbctrl); 1290 usb_free_urb(usbhid->urbctrl);
1283 usb_free_urb(usbhid->urbout); 1291 usb_free_urb(usbhid->urbout);
1284 1292
1285 hid_free_buffers(to_usb_device(hid->dev), hid); 1293 hid_free_buffers(hid_to_usb_dev(hid), hid);
1286 hid_free_device(hid); 1294 hid_free_device(hid);
1287} 1295}
1288 1296
diff --git a/drivers/usb/input/hid-ff.c b/drivers/usb/input/hid-ff.c
index f8f660ee3fac..59ed65e7a621 100644
--- a/drivers/usb/input/hid-ff.c
+++ b/drivers/usb/input/hid-ff.c
@@ -33,6 +33,7 @@
33#include <linux/usb.h> 33#include <linux/usb.h>
34 34
35#include <linux/hid.h> 35#include <linux/hid.h>
36#include "usbhid.h"
36 37
37/* 38/*
38 * This table contains pointers to initializers. To add support for new 39 * This table contains pointers to initializers. To add support for new
@@ -70,8 +71,8 @@ static struct hid_ff_initializer inits[] = {
70int hid_ff_init(struct hid_device* hid) 71int hid_ff_init(struct hid_device* hid)
71{ 72{
72 struct hid_ff_initializer *init; 73 struct hid_ff_initializer *init;
73 int vendor = le16_to_cpu(to_usb_device(hid->dev)->descriptor.idVendor); 74 int vendor = le16_to_cpu(hid_to_usb_dev(hid)->descriptor.idVendor);
74 int product = le16_to_cpu(to_usb_device(hid->dev)->descriptor.idProduct); 75 int product = le16_to_cpu(hid_to_usb_dev(hid)->descriptor.idProduct);
75 76
76 for (init = inits; init->idVendor; init++) 77 for (init = inits; init->idVendor; init++)
77 if (init->idVendor == vendor && init->idProduct == product) 78 if (init->idVendor == vendor && init->idProduct == product)
diff --git a/drivers/usb/input/hiddev.c b/drivers/usb/input/hiddev.c
index 114d6c9f64b1..a8b3d66cd498 100644
--- a/drivers/usb/input/hiddev.c
+++ b/drivers/usb/input/hiddev.c
@@ -384,7 +384,7 @@ static int hiddev_ioctl(struct inode *inode, struct file *file, unsigned int cmd
384 struct hiddev_list *list = file->private_data; 384 struct hiddev_list *list = file->private_data;
385 struct hiddev *hiddev = list->hiddev; 385 struct hiddev *hiddev = list->hiddev;
386 struct hid_device *hid = hiddev->hid; 386 struct hid_device *hid = hiddev->hid;
387 struct usb_device *dev = to_usb_device(hid->dev); 387 struct usb_device *dev = hid_to_usb_dev(hid);
388 struct hiddev_collection_info cinfo; 388 struct hiddev_collection_info cinfo;
389 struct hiddev_report_info rinfo; 389 struct hiddev_report_info rinfo;
390 struct hiddev_field_info finfo; 390 struct hiddev_field_info finfo;
diff --git a/drivers/usb/input/usbhid.h b/drivers/usb/input/usbhid.h
index 830107e5251f..0023f96d4294 100644
--- a/drivers/usb/input/usbhid.h
+++ b/drivers/usb/input/usbhid.h
@@ -80,5 +80,8 @@ struct usbhid_device {
80 80
81}; 81};
82 82
83#define hid_to_usb_dev(hid_dev) \
84 container_of(hid_dev->dev->parent, struct usb_device, dev)
85
83#endif 86#endif
84 87
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index bf26c3c56990..9148694627d5 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -403,7 +403,7 @@ sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
403 403
404 404
405 sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y), 405 sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
406 (u32)SISUSB_HADDR(x, y), 2, &written); 406 (long)SISUSB_HADDR(x, y), 2, &written);
407 407
408 mutex_unlock(&sisusb->lock); 408 mutex_unlock(&sisusb->lock);
409} 409}
@@ -438,7 +438,7 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
438 } 438 }
439 439
440 sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y), 440 sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
441 (u32)SISUSB_HADDR(x, y), count * 2, &written); 441 (long)SISUSB_HADDR(x, y), count * 2, &written);
442 442
443 mutex_unlock(&sisusb->lock); 443 mutex_unlock(&sisusb->lock);
444} 444}
@@ -492,7 +492,7 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
492 492
493 493
494 sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(x, y), 494 sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(x, y),
495 (u32)SISUSB_HADDR(x, y), length, &written); 495 (long)SISUSB_HADDR(x, y), length, &written);
496 496
497 mutex_unlock(&sisusb->lock); 497 mutex_unlock(&sisusb->lock);
498} 498}
@@ -564,7 +564,7 @@ sisusbcon_bmove(struct vc_data *c, int sy, int sx,
564 564
565 565
566 sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(dx, dy), 566 sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(dx, dy),
567 (u32)SISUSB_HADDR(dx, dy), length, &written); 567 (long)SISUSB_HADDR(dx, dy), length, &written);
568 568
569 mutex_unlock(&sisusb->lock); 569 mutex_unlock(&sisusb->lock);
570} 570}
@@ -612,7 +612,7 @@ sisusbcon_switch(struct vc_data *c)
612 length); 612 length);
613 613
614 sisusb_copy_memory(sisusb, (unsigned char *)c->vc_origin, 614 sisusb_copy_memory(sisusb, (unsigned char *)c->vc_origin,
615 (u32)SISUSB_HADDR(0, 0), 615 (long)SISUSB_HADDR(0, 0),
616 length, &written); 616 length, &written);
617 617
618 mutex_unlock(&sisusb->lock); 618 mutex_unlock(&sisusb->lock);
@@ -939,7 +939,7 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
939 } 939 }
940 940
941 sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(0, t), 941 sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(0, t),
942 (u32)SISUSB_HADDR(0, t), length, &written); 942 (long)SISUSB_HADDR(0, t), length, &written);
943 943
944 mutex_unlock(&sisusb->lock); 944 mutex_unlock(&sisusb->lock);
945 945
diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c
index 95e682e2c9d6..f538013965b0 100644
--- a/drivers/usb/net/asix.c
+++ b/drivers/usb/net/asix.c
@@ -920,7 +920,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
920 goto out2; 920 goto out2;
921 921
922 if ((ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 922 if ((ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT,
923 0x0000, 0, 0, buf)) < 0) { 923 1, 0, 0, buf)) < 0) {
924 dbg("Select PHY #1 failed: %d", ret); 924 dbg("Select PHY #1 failed: %d", ret);
925 goto out2; 925 goto out2;
926 } 926 }
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index d3be9214c7c1..31501c9361b9 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -32,7 +32,7 @@ static int funsoft_ioctl(struct usb_serial_port *port, struct file *file,
32 dbg("%s - port %d, cmd 0x%04x", __FUNCTION__, port->number, cmd); 32 dbg("%s - port %d, cmd 0x%04x", __FUNCTION__, port->number, cmd);
33 33
34 if (cmd == TCSETSF) { 34 if (cmd == TCSETSF) {
35 if (user_termios_to_kernel_termios(&t, (void __user *)arg)) 35 if (user_termios_to_kernel_termios(&t, (struct termios __user *)arg))
36 return -EFAULT; 36 return -EFAULT;
37 37
38 dbg("%s - iflag:%x oflag:%x cflag:%x lflag:%x", __FUNCTION__, 38 dbg("%s - iflag:%x oflag:%x cflag:%x lflag:%x", __FUNCTION__,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 819266b7e2f8..5ca04e82ea19 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -625,6 +625,9 @@ static int option_send_setup(struct usb_serial_port *port)
625 625
626 dbg("%s", __FUNCTION__); 626 dbg("%s", __FUNCTION__);
627 627
628 if (port->number != 0)
629 return 0;
630
628 portdata = usb_get_serial_port_data(port); 631 portdata = usb_get_serial_port_data(port);
629 632
630 if (port->tty) { 633 if (port->tty) {
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 5fe7ff441a09..cddef3efba0a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -728,7 +728,7 @@ UNUSUAL_DEV( 0x05ac, 0x1204, 0x0000, 0x9999,
728 "Apple", 728 "Apple",
729 "iPod", 729 "iPod",
730 US_SC_DEVICE, US_PR_DEVICE, NULL, 730 US_SC_DEVICE, US_PR_DEVICE, NULL,
731 US_FL_FIX_CAPACITY ), 731 US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ),
732 732
733UNUSUAL_DEV( 0x05ac, 0x1205, 0x0000, 0x9999, 733UNUSUAL_DEV( 0x05ac, 0x1205, 0x0000, 0x9999,
734 "Apple", 734 "Apple",
@@ -1358,6 +1358,21 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
1358 US_SC_DEVICE, US_PR_DEVICE, NULL, 1358 US_SC_DEVICE, US_PR_DEVICE, NULL,
1359 US_FL_IGNORE_RESIDUE ), 1359 US_FL_IGNORE_RESIDUE ),
1360 1360
1361/* Reported by Francesco Foresti <frafore@tiscali.it> */
1362UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1363 "Super Top",
1364 "IDE DEVICE",
1365 US_SC_DEVICE, US_PR_DEVICE, NULL,
1366 US_FL_IGNORE_RESIDUE ),
1367
1368/* Reported by Robert Schedel <r.schedel@yahoo.de>
1369 * Note: this is a 'super top' device like the above 14cd/6600 device */
1370UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
1371 "Teac",
1372 "HD-35PUK-B",
1373 US_SC_DEVICE, US_PR_DEVICE, NULL,
1374 US_FL_IGNORE_RESIDUE ),
1375
1361/* patch submitted by Davide Perini <perini.davide@dpsoftware.org> 1376/* patch submitted by Davide Perini <perini.davide@dpsoftware.org>
1362 * and Renato Perini <rperini@email.it> 1377 * and Renato Perini <rperini@email.it>
1363 */ 1378 */
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index 61587ca2cdbb..fde1d9518123 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -121,7 +121,7 @@ static int corgibl_probe(struct platform_device *pdev)
121 machinfo->limit_mask = -1; 121 machinfo->limit_mask = -1;
122 122
123 corgi_backlight_device = backlight_device_register ("corgi-bl", 123 corgi_backlight_device = backlight_device_register ("corgi-bl",
124 NULL, &corgibl_data); 124 &pdev->dev, NULL, &corgibl_data);
125 if (IS_ERR (corgi_backlight_device)) 125 if (IS_ERR (corgi_backlight_device))
126 return PTR_ERR (corgi_backlight_device); 126 return PTR_ERR (corgi_backlight_device);
127 127
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 1c569fb543ae..c07d8207fb54 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -105,7 +105,7 @@ static struct backlight_properties hp680bl_data = {
105static int __init hp680bl_probe(struct platform_device *dev) 105static int __init hp680bl_probe(struct platform_device *dev)
106{ 106{
107 hp680_backlight_device = backlight_device_register ("hp680-bl", 107 hp680_backlight_device = backlight_device_register ("hp680-bl",
108 NULL, &hp680bl_data); 108 &dev->dev, NULL, &hp680bl_data);
109 if (IS_ERR (hp680_backlight_device)) 109 if (IS_ERR (hp680_backlight_device))
110 return PTR_ERR (hp680_backlight_device); 110 return PTR_ERR (hp680_backlight_device);
111 111
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 2d7905410b2a..fc812d96c31d 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -184,7 +184,7 @@ static int locomolcd_probe(struct locomo_dev *ldev)
184 184
185 local_irq_restore(flags); 185 local_irq_restore(flags);
186 186
187 locomolcd_bl_device = backlight_device_register("locomo-bl", NULL, &locomobl_data); 187 locomolcd_bl_device = backlight_device_register("locomo-bl", &ldev->dev, NULL, &locomobl_data);
188 188
189 if (IS_ERR (locomolcd_bl_device)) 189 if (IS_ERR (locomolcd_bl_device))
190 return PTR_ERR (locomolcd_bl_device); 190 return PTR_ERR (locomolcd_bl_device);