aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/dock.c11
-rw-r--r--drivers/acpi/ec.c36
-rw-r--r--drivers/acpi/executer/exconfig.c3
-rw-r--r--drivers/acpi/namespace/nsnames.c34
-rw-r--r--drivers/acpi/pci_link.c12
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/resources/rscalc.c3
-rw-r--r--drivers/acpi/utilities/utalloc.c8
-rw-r--r--drivers/acpi/utilities/utdelete.c13
-rw-r--r--drivers/acpi/utilities/utobject.c13
-rw-r--r--drivers/acpi/wmi.c2
-rw-r--r--drivers/cdrom/cdrom.c7
-rw-r--r--drivers/cdrom/gdrom.c7
-rw-r--r--drivers/cdrom/viocd.c7
-rw-r--r--drivers/char/agp/agp.h3
-rw-r--r--drivers/char/agp/ali-agp.c10
-rw-r--r--drivers/char/agp/amd-k7-agp.c10
-rw-r--r--drivers/char/agp/amd64-agp.c51
-rw-r--r--drivers/char/agp/ati-agp.c7
-rw-r--r--drivers/char/agp/backend.c28
-rw-r--r--drivers/char/agp/generic.c41
-rw-r--r--drivers/char/agp/intel-agp.c83
-rw-r--r--drivers/char/agp/isoch.c37
-rw-r--r--drivers/char/agp/sis-agp.c17
-rw-r--r--drivers/char/agp/sworks-agp.c25
-rw-r--r--drivers/char/agp/uninorth-agp.c32
-rw-r--r--drivers/char/hvc_console.c5
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c1
-rw-r--r--drivers/char/rtc.c1
-rw-r--r--drivers/char/synclink_gt.c1
-rw-r--r--drivers/char/tty_io.c74
-rw-r--r--drivers/char/vt.c82
-rw-r--r--drivers/char/vt_ioctl.c4
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c1
-rw-r--r--drivers/cpuidle/governors/ladder.c26
-rw-r--r--drivers/cpuidle/governors/menu.c42
-rw-r--r--drivers/cpuidle/sysfs.c29
-rw-r--r--drivers/crypto/padlock-aes.c28
-rw-r--r--drivers/crypto/padlock-sha.c9
-rw-r--r--drivers/crypto/talitos.c54
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/firmware/memmap.c61
-rw-r--r--drivers/hid/usbhid/hid-quirks.c12
-rw-r--r--drivers/hwmon/Kconfig16
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/abituguru3.c134
-rw-r--r--drivers/hwmon/adcxx.c329
-rw-r--r--drivers/hwmon/applesmc.c20
-rw-r--r--drivers/hwmon/coretemp.c5
-rw-r--r--drivers/hwmon/hwmon-vid.c36
-rw-r--r--drivers/hwmon/i5k_amb.c28
-rw-r--r--drivers/hwmon/ibmaem.c27
-rw-r--r--drivers/hwmon/lm75.c114
-rw-r--r--drivers/hwmon/w83791d.c3
-rw-r--r--drivers/i2c/Kconfig14
-rw-r--r--drivers/i2c/algos/Kconfig11
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c9
-rw-r--r--drivers/i2c/busses/i2c-nforce2-s4985.c5
-rw-r--r--drivers/i2c/chips/at24.c8
-rw-r--r--drivers/i2c/chips/isp1301_omap.c2
-rw-r--r--drivers/i2c/i2c-core.c11
-rw-r--r--drivers/i2c/i2c-dev.c4
-rw-r--r--drivers/ide/ide-cd.c20
-rw-r--r--drivers/ide/pci/aec62xx.c2
-rw-r--r--drivers/ide/pci/cy82c693.c2
-rw-r--r--drivers/ide/pci/hpt366.c2
-rw-r--r--drivers/ide/pci/it821x.c2
-rw-r--r--drivers/ide/pci/pdc202xx_new.c2
-rw-r--r--drivers/ide/pci/scc_pata.c2
-rw-r--r--drivers/ide/pci/sgiioc4.c4
-rw-r--r--drivers/ide/pci/siimage.c2
-rw-r--r--drivers/ide/pci/sis5513.c2
-rw-r--r--drivers/ide/pci/tc86c001.c2
-rw-r--r--drivers/ide/pci/via82cxxx.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c48
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c60
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c17
-rw-r--r--drivers/input/evdev.c63
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/gpio_keys.c3
-rw-r--r--drivers/input/mouse/Kconfig23
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/bcm5974.c681
-rw-r--r--drivers/input/serio/i8042-sparcio.h3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/xilinx_ps2.c4
-rw-r--r--drivers/input/touchscreen/Kconfig21
-rw-r--r--drivers/lguest/page_tables.c25
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c32
-rw-r--r--drivers/mfd/Kconfig21
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/t7l66xb.c419
-rw-r--r--drivers/mfd/tc6387xb.c181
-rw-r--r--drivers/mfd/tc6393xb.c159
-rw-r--r--drivers/misc/acer-wmi.c19
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/mmc/host/Kconfig6
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/s3cmci.c17
-rw-r--r--drivers/mmc/host/sdricoh_cs.c1
-rw-r--r--drivers/mmc/host/tmio_mmc.c691
-rw-r--r--drivers/mmc/host/tmio_mmc.h194
-rw-r--r--drivers/mtd/nand/Kconfig7
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/mtd/nand/tmio_nand.c556
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/bnx2x.h87
-rw-r--r--drivers/net/bnx2x_fw_defs.h160
-rw-r--r--drivers/net/bnx2x_hsi.h16
-rw-r--r--drivers/net/bnx2x_init.h26
-rw-r--r--drivers/net/bnx2x_init_values.h533
-rw-r--r--drivers/net/bnx2x_link.c1258
-rw-r--r--drivers/net/bnx2x_link.h11
-rw-r--r--drivers/net/bnx2x_main.c1212
-rw-r--r--drivers/net/bnx2x_reg.h210
-rw-r--r--drivers/pci/msi.c5
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/probe.c54
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c49
-rw-r--r--drivers/rtc/rtc-dev.c12
-rw-r--r--drivers/rtc/rtc-isl1208.c2
-rw-r--r--drivers/sbus/sbus.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c2
-rw-r--r--drivers/serial/Kconfig1
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/sunsab.c2
-rw-r--r--drivers/serial/sunsu.c2
-rw-r--r--drivers/serial/sunzilog.c2
-rw-r--r--drivers/spi/spi.c40
-rw-r--r--drivers/usb/Kconfig6
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/class/cdc-acm.c86
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/core/driver.c5
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/f_acm.c196
-rw-r--r--drivers/usb/gadget/f_ecm.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c2
-rw-r--r--drivers/usb/gadget/f_serial.c2
-rw-r--r--drivers/usb/gadget/f_subset.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h6
-rw-r--r--drivers/usb/gadget/omap_udc.c5
-rw-r--r--drivers/usb/gadget/u_serial.c290
-rw-r--r--drivers/usb/gadget/u_serial.h12
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/isp1760-hcd.c53
-rw-r--r--drivers/usb/host/isp1760-hcd.h5
-rw-r--r--drivers/usb/host/ohci-hcd.c23
-rw-r--r--drivers/usb/host/ohci-hub.c11
-rw-r--r--drivers/usb/host/ohci-omap.c3
-rw-r--r--drivers/usb/host/ohci-pci.c132
-rw-r--r--drivers/usb/host/ohci-q.c6
-rw-r--r--drivers/usb/host/ohci.h11
-rw-r--r--drivers/usb/host/r8a66597-hcd.c49
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/auerswald.c2152
-rw-r--r--drivers/usb/misc/isight_firmware.c4
-rw-r--r--drivers/usb/musb/Kconfig176
-rw-r--r--drivers/usb/musb/Makefile86
-rw-r--r--drivers/usb/musb/cppi_dma.c1540
-rw-r--r--drivers/usb/musb/cppi_dma.h133
-rw-r--r--drivers/usb/musb/davinci.c462
-rw-r--r--drivers/usb/musb/davinci.h100
-rw-r--r--drivers/usb/musb/musb_core.c2261
-rw-r--r--drivers/usb/musb/musb_core.h507
-rw-r--r--drivers/usb/musb/musb_debug.h66
-rw-r--r--drivers/usb/musb/musb_dma.h172
-rw-r--r--drivers/usb/musb/musb_gadget.c2031
-rw-r--r--drivers/usb/musb/musb_gadget.h108
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c981
-rw-r--r--drivers/usb/musb/musb_host.c2170
-rw-r--r--drivers/usb/musb/musb_host.h110
-rw-r--r--drivers/usb/musb/musb_io.h115
-rw-r--r--drivers/usb/musb/musb_procfs.c830
-rw-r--r--drivers/usb/musb/musb_regs.h300
-rw-r--r--drivers/usb/musb/musb_virthub.c425
-rw-r--r--drivers/usb/musb/musbhsdma.c433
-rw-r--r--drivers/usb/musb/omap2430.c324
-rw-r--r--drivers/usb/musb/omap2430.h56
-rw-r--r--drivers/usb/musb/tusb6010.c1151
-rw-r--r--drivers/usb/musb/tusb6010.h233
-rw-r--r--drivers/usb/musb/tusb6010_omap.c719
-rw-r--r--drivers/usb/serial/Kconfig7
-rw-r--r--drivers/usb/serial/ftdi_sio.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.h7
-rw-r--r--drivers/usb/serial/option.c44
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/sierra.c170
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/storage/Kconfig12
-rw-r--r--drivers/usb/storage/Makefile1
-rw-r--r--drivers/usb/storage/sierra_ms.c207
-rw-r--r--drivers/usb/storage/sierra_ms.h4
-rw-r--r--drivers/usb/storage/transport.c17
-rw-r--r--drivers/usb/storage/unusual_devs.h40
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/video/atmel_lcdfb.c13
-rw-r--r--drivers/video/aty/radeon_accel.c8
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/console/fbcon.h2
-rw-r--r--drivers/video/fsl-diu-fb.c32
-rw-r--r--drivers/video/matrox/i2c-matroxfb.c21
-rw-r--r--drivers/video/matrox/matroxfb_maven.c97
-rw-r--r--drivers/video/pxafb.c68
-rw-r--r--drivers/watchdog/Kconfig5
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c2
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/watchdog/pcwd.c2
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/shwdt.c24
-rw-r--r--drivers/watchdog/txx9wdt.c2
-rw-r--r--drivers/watchdog/wdt285.c2
230 files changed, 23348 insertions, 4855 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index a280ab3d0833..2735bde73475 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
57obj-$(CONFIG_PARIDE) += block/paride/ 57obj-$(CONFIG_PARIDE) += block/paride/
58obj-$(CONFIG_TC) += tc/ 58obj-$(CONFIG_TC) += tc/
59obj-$(CONFIG_USB) += usb/ 59obj-$(CONFIG_USB) += usb/
60obj-$(CONFIG_USB_MUSB_HDRC) += usb/musb/
60obj-$(CONFIG_PCI) += usb/ 61obj-$(CONFIG_PCI) += usb/
61obj-$(CONFIG_USB_GADGET) += usb/gadget/ 62obj-$(CONFIG_USB_GADGET) += usb/gadget/
62obj-$(CONFIG_SERIO) += input/serio/ 63obj-$(CONFIG_SERIO) += input/serio/
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index bb7c51f712bd..7d2edf143f16 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -563,9 +563,6 @@ EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
563 */ 563 */
564static int handle_eject_request(struct dock_station *ds, u32 event) 564static int handle_eject_request(struct dock_station *ds, u32 event)
565{ 565{
566 if (!dock_present(ds))
567 return -ENODEV;
568
569 if (dock_in_progress(ds)) 566 if (dock_in_progress(ds))
570 return -EBUSY; 567 return -EBUSY;
571 568
@@ -573,8 +570,16 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
573 * here we need to generate the undock 570 * here we need to generate the undock
574 * event prior to actually doing the undock 571 * event prior to actually doing the undock
575 * so that the device struct still exists. 572 * so that the device struct still exists.
573 * Also, even send the dock event if the
574 * device is not present anymore
576 */ 575 */
577 dock_event(ds, event, UNDOCK_EVENT); 576 dock_event(ds, event, UNDOCK_EVENT);
577
578 if (!dock_present(ds)) {
579 complete_undock(ds);
580 return -ENODEV;
581 }
582
578 hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST); 583 hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
579 undock(ds); 584 undock(ds);
580 eject_dock(ds); 585 eject_dock(ds);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 5622aee996b2..13593f9f2197 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -110,6 +110,31 @@ static struct acpi_ec {
110 u8 handlers_installed; 110 u8 handlers_installed;
111} *boot_ec, *first_ec; 111} *boot_ec, *first_ec;
112 112
113/*
114 * Some Asus system have exchanged ECDT data/command IO addresses.
115 */
116static int print_ecdt_error(const struct dmi_system_id *id)
117{
118 printk(KERN_NOTICE PREFIX "%s detected - "
119 "ECDT has exchanged control/data I/O address\n",
120 id->ident);
121 return 0;
122}
123
124static struct dmi_system_id __cpuinitdata ec_dmi_table[] = {
125 {
126 print_ecdt_error, "Asus L4R", {
127 DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
128 DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
129 DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
130 {
131 print_ecdt_error, "Asus M6R", {
132 DMI_MATCH(DMI_BIOS_VERSION, "0207"),
133 DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
134 DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
135 {},
136};
137
113/* -------------------------------------------------------------------------- 138/* --------------------------------------------------------------------------
114 Transaction Management 139 Transaction Management
115 -------------------------------------------------------------------------- */ 140 -------------------------------------------------------------------------- */
@@ -196,6 +221,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
196 return 0; 221 return 0;
197 msleep(1); 222 msleep(1);
198 } 223 }
224 if (acpi_ec_check_status(ec,event))
225 return 0;
199 } 226 }
200 pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n", 227 pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
201 acpi_ec_read_status(ec), 228 acpi_ec_read_status(ec),
@@ -911,6 +938,15 @@ int __init acpi_ec_ecdt_probe(void)
911 pr_info(PREFIX "EC description table is found, configuring boot EC\n"); 938 pr_info(PREFIX "EC description table is found, configuring boot EC\n");
912 boot_ec->command_addr = ecdt_ptr->control.address; 939 boot_ec->command_addr = ecdt_ptr->control.address;
913 boot_ec->data_addr = ecdt_ptr->data.address; 940 boot_ec->data_addr = ecdt_ptr->data.address;
941 if (dmi_check_system(ec_dmi_table)) {
942 /*
943 * If the board falls into ec_dmi_table, it means
944 * that ECDT table gives the incorrect command/status
945 * & data I/O address. Just fix it.
946 */
947 boot_ec->data_addr = ecdt_ptr->control.address;
948 boot_ec->command_addr = ecdt_ptr->data.address;
949 }
914 boot_ec->gpe = ecdt_ptr->gpe; 950 boot_ec->gpe = ecdt_ptr->gpe;
915 boot_ec->handle = ACPI_ROOT_OBJECT; 951 boot_ec->handle = ACPI_ROOT_OBJECT;
916 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 952 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c
index 2a32c843cb4a..8892b9824fae 100644
--- a/drivers/acpi/executer/exconfig.c
+++ b/drivers/acpi/executer/exconfig.c
@@ -479,5 +479,8 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
479 479
480 acpi_tb_set_table_loaded_flag(table_index, FALSE); 480 acpi_tb_set_table_loaded_flag(table_index, FALSE);
481 481
482 /* Table unloaded, remove a reference to the ddb_handle object */
483
484 acpi_ut_remove_reference(ddb_handle);
482 return_ACPI_STATUS(AE_OK); 485 return_ACPI_STATUS(AE_OK);
483} 486}
diff --git a/drivers/acpi/namespace/nsnames.c b/drivers/acpi/namespace/nsnames.c
index 549db42f16cf..bd5773878009 100644
--- a/drivers/acpi/namespace/nsnames.c
+++ b/drivers/acpi/namespace/nsnames.c
@@ -56,13 +56,14 @@ ACPI_MODULE_NAME("nsnames")
56 * Size - Size of the pathname 56 * Size - Size of the pathname
57 * *name_buffer - Where to return the pathname 57 * *name_buffer - Where to return the pathname
58 * 58 *
59 * RETURN: Places the pathname into the name_buffer, in external format 59 * RETURN: Status
60 * Places the pathname into the name_buffer, in external format
60 * (name segments separated by path separators) 61 * (name segments separated by path separators)
61 * 62 *
62 * DESCRIPTION: Generate a full pathaname 63 * DESCRIPTION: Generate a full pathaname
63 * 64 *
64 ******************************************************************************/ 65 ******************************************************************************/
65void 66acpi_status
66acpi_ns_build_external_path(struct acpi_namespace_node *node, 67acpi_ns_build_external_path(struct acpi_namespace_node *node,
67 acpi_size size, char *name_buffer) 68 acpi_size size, char *name_buffer)
68{ 69{
@@ -77,7 +78,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
77 if (index < ACPI_NAME_SIZE) { 78 if (index < ACPI_NAME_SIZE) {
78 name_buffer[0] = AML_ROOT_PREFIX; 79 name_buffer[0] = AML_ROOT_PREFIX;
79 name_buffer[1] = 0; 80 name_buffer[1] = 0;
80 return; 81 return (AE_OK);
81 } 82 }
82 83
83 /* Store terminator byte, then build name backwards */ 84 /* Store terminator byte, then build name backwards */
@@ -105,11 +106,13 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
105 106
106 if (index != 0) { 107 if (index != 0) {
107 ACPI_ERROR((AE_INFO, 108 ACPI_ERROR((AE_INFO,
108 "Could not construct pathname; index=%X, size=%X, Path=%s", 109 "Could not construct external pathname; index=%X, size=%X, Path=%s",
109 (u32) index, (u32) size, &name_buffer[size])); 110 (u32) index, (u32) size, &name_buffer[size]));
111
112 return (AE_BAD_PARAMETER);
110 } 113 }
111 114
112 return; 115 return (AE_OK);
113} 116}
114 117
115#ifdef ACPI_DEBUG_OUTPUT 118#ifdef ACPI_DEBUG_OUTPUT
@@ -129,6 +132,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
129 132
130char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) 133char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
131{ 134{
135 acpi_status status;
132 char *name_buffer; 136 char *name_buffer;
133 acpi_size size; 137 acpi_size size;
134 138
@@ -138,8 +142,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
138 142
139 size = acpi_ns_get_pathname_length(node); 143 size = acpi_ns_get_pathname_length(node);
140 if (!size) { 144 if (!size) {
141 ACPI_ERROR((AE_INFO, "Invalid node failure")); 145 return (NULL);
142 return_PTR(NULL);
143 } 146 }
144 147
145 /* Allocate a buffer to be returned to caller */ 148 /* Allocate a buffer to be returned to caller */
@@ -152,7 +155,11 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
152 155
153 /* Build the path in the allocated buffer */ 156 /* Build the path in the allocated buffer */
154 157
155 acpi_ns_build_external_path(node, size, name_buffer); 158 status = acpi_ns_build_external_path(node, size, name_buffer);
159 if (ACPI_FAILURE(status)) {
160 return (NULL);
161 }
162
156 return_PTR(name_buffer); 163 return_PTR(name_buffer);
157} 164}
158#endif 165#endif
@@ -186,7 +193,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
186 while (next_node && (next_node != acpi_gbl_root_node)) { 193 while (next_node && (next_node != acpi_gbl_root_node)) {
187 if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) { 194 if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
188 ACPI_ERROR((AE_INFO, 195 ACPI_ERROR((AE_INFO,
189 "Invalid NS Node (%p) while traversing path", 196 "Invalid Namespace Node (%p) while traversing namespace",
190 next_node)); 197 next_node));
191 return 0; 198 return 0;
192 } 199 }
@@ -234,8 +241,7 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
234 241
235 required_size = acpi_ns_get_pathname_length(node); 242 required_size = acpi_ns_get_pathname_length(node);
236 if (!required_size) { 243 if (!required_size) {
237 ACPI_ERROR((AE_INFO, "Invalid node failure")); 244 return_ACPI_STATUS(AE_BAD_PARAMETER);
238 return_ACPI_STATUS(AE_ERROR);
239 } 245 }
240 246
241 /* Validate/Allocate/Clear caller buffer */ 247 /* Validate/Allocate/Clear caller buffer */
@@ -247,7 +253,11 @@ acpi_ns_handle_to_pathname(acpi_handle target_handle,
247 253
248 /* Build the path in the caller buffer */ 254 /* Build the path in the caller buffer */
249 255
250 acpi_ns_build_external_path(node, required_size, buffer->pointer); 256 status =
257 acpi_ns_build_external_path(node, required_size, buffer->pointer);
258 if (ACPI_FAILURE(status)) {
259 return_ACPI_STATUS(status);
260 }
251 261
252 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n", 262 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%X]\n",
253 (char *)buffer->pointer, (u32) required_size)); 263 (char *)buffer->pointer, (u32) required_size));
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 89f3b2abfdc7..cf47805a7448 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -849,7 +849,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
849 if (irq < 0) 849 if (irq < 0)
850 continue; 850 continue;
851 851
852 if (irq >= ACPI_MAX_IRQS) 852 if (irq >= ARRAY_SIZE(acpi_irq_penalty))
853 continue; 853 continue;
854 854
855 if (used) 855 if (used)
@@ -872,10 +872,12 @@ static int __init acpi_irq_penalty_update(char *str, int used)
872 */ 872 */
873void acpi_penalize_isa_irq(int irq, int active) 873void acpi_penalize_isa_irq(int irq, int active)
874{ 874{
875 if (active) 875 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
876 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED; 876 if (active)
877 else 877 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
878 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; 878 else
879 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
880 }
879} 881}
880 882
881/* 883/*
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e36422a7122c..d3f0a62efcc1 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -123,7 +123,7 @@ struct acpi_processor_errata errata __read_mostly;
123static int set_no_mwait(const struct dmi_system_id *id) 123static int set_no_mwait(const struct dmi_system_id *id)
124{ 124{
125 printk(KERN_NOTICE PREFIX "%s detected - " 125 printk(KERN_NOTICE PREFIX "%s detected - "
126 "disable mwait for CPU C-stetes\n", id->ident); 126 "disabling mwait for CPU C-states\n", id->ident);
127 idle_nomwait = 1; 127 idle_nomwait = 1;
128 return 0; 128 return 0;
129} 129}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 283c08f5f4d4..cf5b1b7b684f 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -41,7 +41,6 @@
41#include <linux/pm_qos_params.h> 41#include <linux/pm_qos_params.h>
42#include <linux/clockchips.h> 42#include <linux/clockchips.h>
43#include <linux/cpuidle.h> 43#include <linux/cpuidle.h>
44#include <linux/cpuidle.h>
45 44
46/* 45/*
47 * Include the apic definitions for x86 to have the APIC timer related defines 46 * Include the apic definitions for x86 to have the APIC timer related defines
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0133af49cf06..80e32093e977 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -70,7 +70,7 @@ static DEFINE_MUTEX(performance_mutex);
70 * 0 -> cpufreq low level drivers initialized -> consider _PPC values 70 * 0 -> cpufreq low level drivers initialized -> consider _PPC values
71 * 1 -> ignore _PPC totally -> forced by user through boot param 71 * 1 -> ignore _PPC totally -> forced by user through boot param
72 */ 72 */
73static unsigned int ignore_ppc = -1; 73static int ignore_ppc = -1;
74module_param(ignore_ppc, uint, 0644); 74module_param(ignore_ppc, uint, 0644);
75MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \ 75MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
76 "limited by BIOS, this should help"); 76 "limited by BIOS, this should help");
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index f61ebc679e66..d9063ea414e3 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -587,6 +587,9 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
587 } else { 587 } else {
588 temp_size_needed += 588 temp_size_needed +=
589 acpi_ns_get_pathname_length((*sub_object_list)->reference.node); 589 acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
590 if (!temp_size_needed) {
591 return_ACPI_STATUS(AE_BAD_PARAMETER);
592 }
590 } 593 }
591 } else { 594 } else {
592 /* 595 /*
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index e7bf34a7b1d2..7dcb67e0b215 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -242,10 +242,12 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
242{ 242{
243 acpi_status status = AE_OK; 243 acpi_status status = AE_OK;
244 244
245 if (!required_length) { 245 /* Parameter validation */
246 WARN_ON(1); 246
247 return AE_ERROR; 247 if (!buffer || !required_length) {
248 return (AE_BAD_PARAMETER);
248 } 249 }
250
249 switch (buffer->length) { 251 switch (buffer->length) {
250 case ACPI_NO_BUFFER: 252 case ACPI_NO_BUFFER:
251 253
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index c5c791a575c9..42609d3a8aa9 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -135,6 +135,10 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
135 obj_pointer = object->package.elements; 135 obj_pointer = object->package.elements;
136 break; 136 break;
137 137
138 /*
139 * These objects have a possible list of notify handlers.
140 * Device object also may have a GPE block.
141 */
138 case ACPI_TYPE_DEVICE: 142 case ACPI_TYPE_DEVICE:
139 143
140 if (object->device.gpe_block) { 144 if (object->device.gpe_block) {
@@ -142,9 +146,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
142 gpe_block); 146 gpe_block);
143 } 147 }
144 148
145 /* Walk the handler list for this device */ 149 /*lint -fallthrough */
150
151 case ACPI_TYPE_PROCESSOR:
152 case ACPI_TYPE_THERMAL:
153
154 /* Walk the notify handler list for this object */
146 155
147 handler_desc = object->device.handler; 156 handler_desc = object->common_notify.handler;
148 while (handler_desc) { 157 while (handler_desc) {
149 next_desc = handler_desc->address_space.next; 158 next_desc = handler_desc->address_space.next;
150 acpi_ut_remove_reference(handler_desc); 159 acpi_ut_remove_reference(handler_desc);
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c
index e25484495e65..916eff399eb3 100644
--- a/drivers/acpi/utilities/utobject.c
+++ b/drivers/acpi/utilities/utobject.c
@@ -425,6 +425,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
425 acpi_size * obj_length) 425 acpi_size * obj_length)
426{ 426{
427 acpi_size length; 427 acpi_size length;
428 acpi_size size;
428 acpi_status status = AE_OK; 429 acpi_status status = AE_OK;
429 430
430 ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object); 431 ACPI_FUNCTION_TRACE_PTR(ut_get_simple_object_size, internal_object);
@@ -484,10 +485,14 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
484 * Get the actual length of the full pathname to this object. 485 * Get the actual length of the full pathname to this object.
485 * The reference will be converted to the pathname to the object 486 * The reference will be converted to the pathname to the object
486 */ 487 */
487 length += 488 size =
488 ACPI_ROUND_UP_TO_NATIVE_WORD 489 acpi_ns_get_pathname_length(internal_object->
489 (acpi_ns_get_pathname_length 490 reference.node);
490 (internal_object->reference.node)); 491 if (!size) {
492 return_ACPI_STATUS(AE_BAD_PARAMETER);
493 }
494
495 length += ACPI_ROUND_UP_TO_NATIVE_WORD(size);
491 break; 496 break;
492 497
493 default: 498 default:
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
index c33b1c6e93b1..cfe2c833474d 100644
--- a/drivers/acpi/wmi.c
+++ b/drivers/acpi/wmi.c
@@ -347,7 +347,7 @@ struct acpi_buffer *out)
347 strcpy(method, "WQ"); 347 strcpy(method, "WQ");
348 strncat(method, block->object_id, 2); 348 strncat(method, block->object_id, 2);
349 349
350 status = acpi_evaluate_object(handle, method, NULL, out); 350 status = acpi_evaluate_object(handle, method, &input, out);
351 351
352 /* 352 /*
353 * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if 353 * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index d9d1b65d206c..74031de517e6 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -408,7 +408,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
408 ENSURE(get_last_session, CDC_MULTI_SESSION); 408 ENSURE(get_last_session, CDC_MULTI_SESSION);
409 ENSURE(get_mcn, CDC_MCN); 409 ENSURE(get_mcn, CDC_MCN);
410 ENSURE(reset, CDC_RESET); 410 ENSURE(reset, CDC_RESET);
411 ENSURE(audio_ioctl, CDC_PLAY_AUDIO);
412 ENSURE(generic_packet, CDC_GENERIC_PACKET); 411 ENSURE(generic_packet, CDC_GENERIC_PACKET);
413 cdi->mc_flags = 0; 412 cdi->mc_flags = 0;
414 cdo->n_minors = 0; 413 cdo->n_minors = 0;
@@ -2506,8 +2505,6 @@ static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi,
2506 2505
2507 /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ 2506 /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/
2508 2507
2509 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2510 return -ENOSYS;
2511 if (copy_from_user(&q, argp, sizeof(q))) 2508 if (copy_from_user(&q, argp, sizeof(q)))
2512 return -EFAULT; 2509 return -EFAULT;
2513 2510
@@ -2538,8 +2535,6 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
2538 2535
2539 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ 2536 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
2540 2537
2541 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2542 return -ENOSYS;
2543 if (copy_from_user(&header, argp, sizeof(header))) 2538 if (copy_from_user(&header, argp, sizeof(header)))
2544 return -EFAULT; 2539 return -EFAULT;
2545 2540
@@ -2562,8 +2557,6 @@ static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
2562 2557
2563 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ 2558 /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
2564 2559
2565 if (!CDROM_CAN(CDC_PLAY_AUDIO))
2566 return -ENOSYS;
2567 if (copy_from_user(&entry, argp, sizeof(entry))) 2560 if (copy_from_user(&entry, argp, sizeof(entry)))
2568 return -EFAULT; 2561 return -EFAULT;
2569 2562
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 1e0455bd6df9..1231d95aa695 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -471,6 +471,12 @@ cleanup_sense_final:
471 return err; 471 return err;
472} 472}
473 473
474static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
475 void *arg)
476{
477 return -EINVAL;
478}
479
474static struct cdrom_device_ops gdrom_ops = { 480static struct cdrom_device_ops gdrom_ops = {
475 .open = gdrom_open, 481 .open = gdrom_open,
476 .release = gdrom_release, 482 .release = gdrom_release,
@@ -478,6 +484,7 @@ static struct cdrom_device_ops gdrom_ops = {
478 .media_changed = gdrom_mediachanged, 484 .media_changed = gdrom_mediachanged,
479 .get_last_session = gdrom_get_last_session, 485 .get_last_session = gdrom_get_last_session,
480 .reset = gdrom_hardreset, 486 .reset = gdrom_hardreset,
487 .audio_ioctl = gdrom_audio_ioctl,
481 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | 488 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
482 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, 489 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
483 .n_minors = 1, 490 .n_minors = 1,
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 9d0dfe6e0d63..031e0e1a1a3b 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -550,12 +550,19 @@ return_complete:
550 } 550 }
551} 551}
552 552
553static int viocd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
554 void *arg)
555{
556 return -EINVAL;
557}
558
553static struct cdrom_device_ops viocd_dops = { 559static struct cdrom_device_ops viocd_dops = {
554 .open = viocd_open, 560 .open = viocd_open,
555 .release = viocd_release, 561 .release = viocd_release,
556 .media_changed = viocd_media_changed, 562 .media_changed = viocd_media_changed,
557 .lock_door = viocd_lock_door, 563 .lock_door = viocd_lock_door,
558 .generic_packet = viocd_packet, 564 .generic_packet = viocd_packet,
565 .audio_ioctl = viocd_audio_ioctl,
559 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM 566 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
560}; 567};
561 568
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 81e14bea54bd..4bada0e8b812 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -148,6 +148,9 @@ struct agp_bridge_data {
148 char minor_version; 148 char minor_version;
149 struct list_head list; 149 struct list_head list;
150 u32 apbase_config; 150 u32 apbase_config;
151 /* list of agp_memory mapped to the aperture */
152 struct list_head mapped_list;
153 spinlock_t mapped_lock;
151}; 154};
152 155
153#define KB(x) ((x) * 1024) 156#define KB(x) ((x) * 1024)
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 1ffb381130c3..31dcd9142d54 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -110,7 +110,8 @@ static int ali_configure(void)
110 110
111 nlvm_addr+= agp_bridge->gart_bus_addr; 111 nlvm_addr+= agp_bridge->gart_bus_addr;
112 nlvm_addr|=(agp_bridge->gart_bus_addr>>12); 112 nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
113 printk(KERN_INFO PFX "nlvm top &base = %8x\n",nlvm_addr); 113 dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n",
114 nlvm_addr);
114 } 115 }
115#endif 116#endif
116 117
@@ -315,8 +316,8 @@ static int __devinit agp_ali_probe(struct pci_dev *pdev,
315 goto found; 316 goto found;
316 } 317 }
317 318
318 printk(KERN_ERR PFX "Unsupported ALi chipset (device id: %04x)\n", 319 dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
319 pdev->device); 320 pdev->vendor, pdev->device);
320 return -ENODEV; 321 return -ENODEV;
321 322
322 323
@@ -361,8 +362,7 @@ found:
361 bridge->driver = &ali_generic_bridge; 362 bridge->driver = &ali_generic_bridge;
362 } 363 }
363 364
364 printk(KERN_INFO PFX "Detected ALi %s chipset\n", 365 dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);
365 devs[j].chipset_name);
366 366
367 /* Fill in the mode register */ 367 /* Fill in the mode register */
368 pci_read_config_dword(pdev, 368 pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 39a0718bc616..e280531843be 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -419,8 +419,8 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
419 return -ENODEV; 419 return -ENODEV;
420 420
421 j = ent - agp_amdk7_pci_table; 421 j = ent - agp_amdk7_pci_table;
422 printk(KERN_INFO PFX "Detected AMD %s chipset\n", 422 dev_info(&pdev->dev, "AMD %s chipset\n",
423 amd_agp_device_ids[j].chipset_name); 423 amd_agp_device_ids[j].chipset_name);
424 424
425 bridge = agp_alloc_bridge(); 425 bridge = agp_alloc_bridge();
426 if (!bridge) 426 if (!bridge)
@@ -442,7 +442,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
442 while (!cap_ptr) { 442 while (!cap_ptr) {
443 gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard); 443 gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
444 if (!gfxcard) { 444 if (!gfxcard) {
445 printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n"); 445 dev_info(&pdev->dev, "no AGP VGA controller\n");
446 return -ENODEV; 446 return -ENODEV;
447 } 447 }
448 cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP); 448 cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
@@ -453,7 +453,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
453 (if necessary at all). */ 453 (if necessary at all). */
454 if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) { 454 if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
455 agp_bridge->flags |= AGP_ERRATA_1X; 455 agp_bridge->flags |= AGP_ERRATA_1X;
456 printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n"); 456 dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
457 } 457 }
458 pci_dev_put(gfxcard); 458 pci_dev_put(gfxcard);
459 } 459 }
@@ -469,7 +469,7 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
469 agp_bridge->flags = AGP_ERRATA_FASTWRITES; 469 agp_bridge->flags = AGP_ERRATA_FASTWRITES;
470 agp_bridge->flags |= AGP_ERRATA_SBA; 470 agp_bridge->flags |= AGP_ERRATA_SBA;
471 agp_bridge->flags |= AGP_ERRATA_1X; 471 agp_bridge->flags |= AGP_ERRATA_1X;
472 printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n"); 472 dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
473 } 473 }
474 } 474 }
475 475
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 481ffe87c716..7495c522d8e4 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -34,6 +34,7 @@
34 34
35static struct resource *aperture_resource; 35static struct resource *aperture_resource;
36static int __initdata agp_try_unsupported = 1; 36static int __initdata agp_try_unsupported = 1;
37static int agp_bridges_found;
37 38
38static void amd64_tlbflush(struct agp_memory *temp) 39static void amd64_tlbflush(struct agp_memory *temp)
39{ 40{
@@ -293,12 +294,13 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
293 * so let double check that order, and lets trust the AMD NB settings 294 * so let double check that order, and lets trust the AMD NB settings
294 */ 295 */
295 if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) { 296 if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
296 printk(KERN_INFO "Aperture size %u MB is not right, using settings from NB\n", 297 dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
297 32 << order); 298 32 << order);
298 order = nb_order; 299 order = nb_order;
299 } 300 }
300 301
301 printk(KERN_INFO PFX "Aperture from AGP @ %Lx size %u MB\n", aper, 32 << order); 302 dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
303 aper, 32 << order);
302 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) 304 if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
303 return -1; 305 return -1;
304 306
@@ -319,10 +321,10 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
319 for (i = 0; i < num_k8_northbridges; i++) { 321 for (i = 0; i < num_k8_northbridges; i++) {
320 struct pci_dev *dev = k8_northbridges[i]; 322 struct pci_dev *dev = k8_northbridges[i];
321 if (fix_northbridge(dev, pdev, cap_ptr) < 0) { 323 if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
322 printk(KERN_ERR PFX "No usable aperture found.\n"); 324 dev_err(&dev->dev, "no usable aperture found\n");
323#ifdef __x86_64__ 325#ifdef __x86_64__
324 /* should port this to i386 */ 326 /* should port this to i386 */
325 printk(KERN_ERR PFX "Consider rebooting with iommu=memaper=2 to get a good aperture.\n"); 327 dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
326#endif 328#endif
327 return -1; 329 return -1;
328 } 330 }
@@ -345,14 +347,14 @@ static void __devinit amd8151_init(struct pci_dev *pdev, struct agp_bridge_data
345 default: revstring="??"; break; 347 default: revstring="??"; break;
346 } 348 }
347 349
348 printk (KERN_INFO PFX "Detected AMD 8151 AGP Bridge rev %s\n", revstring); 350 dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
349 351
350 /* 352 /*
351 * Work around errata. 353 * Work around errata.
352 * Chips before B2 stepping incorrectly reporting v3.5 354 * Chips before B2 stepping incorrectly reporting v3.5
353 */ 355 */
354 if (pdev->revision < 0x13) { 356 if (pdev->revision < 0x13) {
355 printk (KERN_INFO PFX "Correcting AGP revision (reports 3.5, is really 3.0)\n"); 357 dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
356 bridge->major_version = 3; 358 bridge->major_version = 3;
357 bridge->minor_version = 0; 359 bridge->minor_version = 0;
358 } 360 }
@@ -375,11 +377,11 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
375 struct pci_dev *dev1; 377 struct pci_dev *dev1;
376 int i; 378 int i;
377 unsigned size = amd64_fetch_size(); 379 unsigned size = amd64_fetch_size();
378 printk(KERN_INFO "Setting up ULi AGP.\n"); 380
381 dev_info(&pdev->dev, "setting up ULi AGP\n");
379 dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0)); 382 dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
380 if (dev1 == NULL) { 383 if (dev1 == NULL) {
381 printk(KERN_INFO PFX "Detected a ULi chipset, " 384 dev_info(&pdev->dev, "can't find ULi secondary device\n");
382 "but could not fine the secondary device.\n");
383 return -ENODEV; 385 return -ENODEV;
384 } 386 }
385 387
@@ -388,7 +390,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
388 break; 390 break;
389 391
390 if (i == ARRAY_SIZE(uli_sizes)) { 392 if (i == ARRAY_SIZE(uli_sizes)) {
391 printk(KERN_INFO PFX "No ULi size found for %d\n", size); 393 dev_info(&pdev->dev, "no ULi size found for %d\n", size);
392 return -ENODEV; 394 return -ENODEV;
393 } 395 }
394 396
@@ -433,13 +435,11 @@ static int nforce3_agp_init(struct pci_dev *pdev)
433 int i; 435 int i;
434 unsigned size = amd64_fetch_size(); 436 unsigned size = amd64_fetch_size();
435 437
436 printk(KERN_INFO PFX "Setting up Nforce3 AGP.\n"); 438 dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
437 439
438 dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0)); 440 dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
439 if (dev1 == NULL) { 441 if (dev1 == NULL) {
440 printk(KERN_INFO PFX "agpgart: Detected an NVIDIA " 442 dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
441 "nForce3 chipset, but could not find "
442 "the secondary device.\n");
443 return -ENODEV; 443 return -ENODEV;
444 } 444 }
445 445
@@ -448,7 +448,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
448 break; 448 break;
449 449
450 if (i == ARRAY_SIZE(nforce3_sizes)) { 450 if (i == ARRAY_SIZE(nforce3_sizes)) {
451 printk(KERN_INFO PFX "No NForce3 size found for %d\n", size); 451 dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
452 return -ENODEV; 452 return -ENODEV;
453 } 453 }
454 454
@@ -462,7 +462,7 @@ static int nforce3_agp_init(struct pci_dev *pdev)
462 462
463 /* if x86-64 aperture base is beyond 4G, exit here */ 463 /* if x86-64 aperture base is beyond 4G, exit here */
464 if ( (apbase & 0x7fff) >> (32 - 25) ) { 464 if ( (apbase & 0x7fff) >> (32 - 25) ) {
465 printk(KERN_INFO PFX "aperture base > 4G\n"); 465 dev_info(&pdev->dev, "aperture base > 4G\n");
466 return -ENODEV; 466 return -ENODEV;
467 } 467 }
468 468
@@ -489,6 +489,7 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
489{ 489{
490 struct agp_bridge_data *bridge; 490 struct agp_bridge_data *bridge;
491 u8 cap_ptr; 491 u8 cap_ptr;
492 int err;
492 493
493 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); 494 cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
494 if (!cap_ptr) 495 if (!cap_ptr)
@@ -504,7 +505,8 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
504 pdev->device == PCI_DEVICE_ID_AMD_8151_0) { 505 pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
505 amd8151_init(pdev, bridge); 506 amd8151_init(pdev, bridge);
506 } else { 507 } else {
507 printk(KERN_INFO PFX "Detected AGP bridge %x\n", pdev->devfn); 508 dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
509 pdev->vendor, pdev->device);
508 } 510 }
509 511
510 bridge->driver = &amd_8151_driver; 512 bridge->driver = &amd_8151_driver;
@@ -536,7 +538,12 @@ static int __devinit agp_amd64_probe(struct pci_dev *pdev,
536 } 538 }
537 539
538 pci_set_drvdata(pdev, bridge); 540 pci_set_drvdata(pdev, bridge);
539 return agp_add_bridge(bridge); 541 err = agp_add_bridge(bridge);
542 if (err < 0)
543 return err;
544
545 agp_bridges_found++;
546 return 0;
540} 547}
541 548
542static void __devexit agp_amd64_remove(struct pci_dev *pdev) 549static void __devexit agp_amd64_remove(struct pci_dev *pdev)
@@ -713,7 +720,11 @@ int __init agp_amd64_init(void)
713 720
714 if (agp_off) 721 if (agp_off)
715 return -EINVAL; 722 return -EINVAL;
716 if (pci_register_driver(&agp_amd64_pci_driver) < 0) { 723 err = pci_register_driver(&agp_amd64_pci_driver);
724 if (err < 0)
725 return err;
726
727 if (agp_bridges_found == 0) {
717 struct pci_dev *dev; 728 struct pci_dev *dev;
718 if (!agp_try_unsupported && !agp_try_unsupported_boot) { 729 if (!agp_try_unsupported && !agp_try_unsupported_boot) {
719 printk(KERN_INFO PFX "No supported AGP bridge found.\n"); 730 printk(KERN_INFO PFX "No supported AGP bridge found.\n");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 3a4566c0d84f..6ecbcafb34b1 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -486,8 +486,8 @@ static int __devinit agp_ati_probe(struct pci_dev *pdev,
486 goto found; 486 goto found;
487 } 487 }
488 488
489 printk(KERN_ERR PFX 489 dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n",
490 "Unsupported Ati chipset (device id: %04x)\n", pdev->device); 490 pdev->vendor, pdev->device);
491 return -ENODEV; 491 return -ENODEV;
492 492
493found: 493found:
@@ -500,8 +500,7 @@ found:
500 500
501 bridge->driver = &ati_generic_bridge; 501 bridge->driver = &ati_generic_bridge;
502 502
503 printk(KERN_INFO PFX "Detected Ati %s chipset\n", 503 dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name);
504 devs[j].chipset_name);
505 504
506 /* Fill in the mode register */ 505 /* Fill in the mode register */
507 pci_read_config_dword(pdev, 506 pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 1ec87104e68c..3a3cc03d401c 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -144,7 +144,8 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
144 void *addr = bridge->driver->agp_alloc_page(bridge); 144 void *addr = bridge->driver->agp_alloc_page(bridge);
145 145
146 if (!addr) { 146 if (!addr) {
147 printk(KERN_ERR PFX "unable to get memory for scratch page.\n"); 147 dev_err(&bridge->dev->dev,
148 "can't get memory for scratch page\n");
148 return -ENOMEM; 149 return -ENOMEM;
149 } 150 }
150 151
@@ -155,13 +156,13 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
155 156
156 size_value = bridge->driver->fetch_size(); 157 size_value = bridge->driver->fetch_size();
157 if (size_value == 0) { 158 if (size_value == 0) {
158 printk(KERN_ERR PFX "unable to determine aperture size.\n"); 159 dev_err(&bridge->dev->dev, "can't determine aperture size\n");
159 rc = -EINVAL; 160 rc = -EINVAL;
160 goto err_out; 161 goto err_out;
161 } 162 }
162 if (bridge->driver->create_gatt_table(bridge)) { 163 if (bridge->driver->create_gatt_table(bridge)) {
163 printk(KERN_ERR PFX 164 dev_err(&bridge->dev->dev,
164 "unable to get memory for graphics translation table.\n"); 165 "can't get memory for graphics translation table\n");
165 rc = -ENOMEM; 166 rc = -ENOMEM;
166 goto err_out; 167 goto err_out;
167 } 168 }
@@ -169,7 +170,8 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
169 170
170 bridge->key_list = vmalloc(PAGE_SIZE * 4); 171 bridge->key_list = vmalloc(PAGE_SIZE * 4);
171 if (bridge->key_list == NULL) { 172 if (bridge->key_list == NULL) {
172 printk(KERN_ERR PFX "error allocating memory for key lists.\n"); 173 dev_err(&bridge->dev->dev,
174 "can't allocate memory for key lists\n");
173 rc = -ENOMEM; 175 rc = -ENOMEM;
174 goto err_out; 176 goto err_out;
175 } 177 }
@@ -179,10 +181,12 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
179 memset(bridge->key_list, 0, PAGE_SIZE * 4); 181 memset(bridge->key_list, 0, PAGE_SIZE * 4);
180 182
181 if (bridge->driver->configure()) { 183 if (bridge->driver->configure()) {
182 printk(KERN_ERR PFX "error configuring host chipset.\n"); 184 dev_err(&bridge->dev->dev, "error configuring host chipset\n");
183 rc = -EINVAL; 185 rc = -EINVAL;
184 goto err_out; 186 goto err_out;
185 } 187 }
188 INIT_LIST_HEAD(&bridge->mapped_list);
189 spin_lock_init(&bridge->mapped_lock);
186 190
187 return 0; 191 return 0;
188 192
@@ -269,25 +273,27 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
269 273
270 /* Grab reference on the chipset driver. */ 274 /* Grab reference on the chipset driver. */
271 if (!try_module_get(bridge->driver->owner)) { 275 if (!try_module_get(bridge->driver->owner)) {
272 printk (KERN_INFO PFX "Couldn't lock chipset driver.\n"); 276 dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
273 return -EINVAL; 277 return -EINVAL;
274 } 278 }
275 279
276 error = agp_backend_initialize(bridge); 280 error = agp_backend_initialize(bridge);
277 if (error) { 281 if (error) {
278 printk (KERN_INFO PFX "agp_backend_initialize() failed.\n"); 282 dev_info(&bridge->dev->dev,
283 "agp_backend_initialize() failed\n");
279 goto err_out; 284 goto err_out;
280 } 285 }
281 286
282 if (list_empty(&agp_bridges)) { 287 if (list_empty(&agp_bridges)) {
283 error = agp_frontend_initialize(); 288 error = agp_frontend_initialize();
284 if (error) { 289 if (error) {
285 printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n"); 290 dev_info(&bridge->dev->dev,
291 "agp_frontend_initialize() failed\n");
286 goto frontend_err; 292 goto frontend_err;
287 } 293 }
288 294
289 printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n", 295 dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
290 bridge->driver->fetch_size(), bridge->gart_bus_addr); 296 bridge->driver->fetch_size(), bridge->gart_bus_addr);
291 297
292 } 298 }
293 299
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index eaa1a355bb32..118dbde25dc7 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -429,6 +429,10 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
429 429
430 curr->is_bound = true; 430 curr->is_bound = true;
431 curr->pg_start = pg_start; 431 curr->pg_start = pg_start;
432 spin_lock(&agp_bridge->mapped_lock);
433 list_add(&curr->mapped_list, &agp_bridge->mapped_list);
434 spin_unlock(&agp_bridge->mapped_lock);
435
432 return 0; 436 return 0;
433} 437}
434EXPORT_SYMBOL(agp_bind_memory); 438EXPORT_SYMBOL(agp_bind_memory);
@@ -461,10 +465,34 @@ int agp_unbind_memory(struct agp_memory *curr)
461 465
462 curr->is_bound = false; 466 curr->is_bound = false;
463 curr->pg_start = 0; 467 curr->pg_start = 0;
468 spin_lock(&curr->bridge->mapped_lock);
469 list_del(&curr->mapped_list);
470 spin_unlock(&curr->bridge->mapped_lock);
464 return 0; 471 return 0;
465} 472}
466EXPORT_SYMBOL(agp_unbind_memory); 473EXPORT_SYMBOL(agp_unbind_memory);
467 474
475/**
476 * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
477 */
478int agp_rebind_memory(void)
479{
480 struct agp_memory *curr;
481 int ret_val = 0;
482
483 spin_lock(&agp_bridge->mapped_lock);
484 list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
485 ret_val = curr->bridge->driver->insert_memory(curr,
486 curr->pg_start,
487 curr->type);
488 if (ret_val != 0)
489 break;
490 }
491 spin_unlock(&agp_bridge->mapped_lock);
492 return ret_val;
493}
494EXPORT_SYMBOL(agp_rebind_memory);
495
468/* End - Routines for handling swapping of agp_memory into the GATT */ 496/* End - Routines for handling swapping of agp_memory into the GATT */
469 497
470 498
@@ -771,8 +799,8 @@ void agp_device_command(u32 bridge_agpstat, bool agp_v3)
771 if (!agp) 799 if (!agp)
772 continue; 800 continue;
773 801
774 printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n", 802 dev_info(&device->dev, "putting AGP V%d device into %dx mode\n",
775 agp_v3 ? 3 : 2, pci_name(device), mode); 803 agp_v3 ? 3 : 2, mode);
776 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); 804 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat);
777 } 805 }
778} 806}
@@ -800,10 +828,8 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
800 828
801 get_agp_version(agp_bridge); 829 get_agp_version(agp_bridge);
802 830
803 printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", 831 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
804 agp_bridge->major_version, 832 agp_bridge->major_version, agp_bridge->minor_version);
805 agp_bridge->minor_version,
806 pci_name(agp_bridge->dev));
807 833
808 pci_read_config_dword(agp_bridge->dev, 834 pci_read_config_dword(agp_bridge->dev,
809 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); 835 agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat);
@@ -832,8 +858,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode)
832 pci_write_config_dword(bridge->dev, 858 pci_write_config_dword(bridge->dev,
833 bridge->capndx+AGPCTRL, temp); 859 bridge->capndx+AGPCTRL, temp);
834 860
835 printk(KERN_INFO PFX "Device is in legacy mode," 861 dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n");
836 " falling back to 2.x\n");
837 } 862 }
838 } 863 }
839 864
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index df702642ab8f..016fdf0623a4 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -32,8 +32,8 @@
32#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 32#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
33#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 33#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
34#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 34#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
35#define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40 35#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
36#define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42 36#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
37#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 37#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00
38#define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02 38#define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02
39#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 39#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
@@ -55,7 +55,7 @@
55 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \ 55 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
56 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \ 56 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
57 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \ 57 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \
58 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB) 58 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB)
59 59
60#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ 60#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
61 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ 61 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
@@ -161,7 +161,7 @@ static int intel_i810_fetch_size(void)
161 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); 161 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
162 162
163 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { 163 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
164 printk(KERN_WARNING PFX "i810 is disabled\n"); 164 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
165 return 0; 165 return 0;
166 } 166 }
167 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { 167 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
@@ -193,7 +193,8 @@ static int intel_i810_configure(void)
193 193
194 intel_private.registers = ioremap(temp, 128 * 4096); 194 intel_private.registers = ioremap(temp, 128 * 4096);
195 if (!intel_private.registers) { 195 if (!intel_private.registers) {
196 printk(KERN_ERR PFX "Unable to remap memory.\n"); 196 dev_err(&intel_private.pcidev->dev,
197 "can't remap memory\n");
197 return -ENOMEM; 198 return -ENOMEM;
198 } 199 }
199 } 200 }
@@ -201,7 +202,8 @@ static int intel_i810_configure(void)
201 if ((readl(intel_private.registers+I810_DRAM_CTL) 202 if ((readl(intel_private.registers+I810_DRAM_CTL)
202 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { 203 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
203 /* This will need to be dynamically assigned */ 204 /* This will need to be dynamically assigned */
204 printk(KERN_INFO PFX "detected 4MB dedicated video ram.\n"); 205 dev_info(&intel_private.pcidev->dev,
206 "detected 4MB dedicated video ram\n");
205 intel_private.num_dcache_entries = 1024; 207 intel_private.num_dcache_entries = 1024;
206 } 208 }
207 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); 209 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
@@ -500,8 +502,8 @@ static void intel_i830_init_gtt_entries(void)
500 size = 1024 + 512; 502 size = 1024 + 512;
501 break; 503 break;
502 default: 504 default:
503 printk(KERN_INFO PFX "Unknown page table size, " 505 dev_info(&intel_private.pcidev->dev,
504 "assuming 512KB\n"); 506 "unknown page table size, assuming 512KB\n");
505 size = 512; 507 size = 512;
506 } 508 }
507 size += 4; /* add in BIOS popup space */ 509 size += 4; /* add in BIOS popup space */
@@ -515,8 +517,8 @@ static void intel_i830_init_gtt_entries(void)
515 size = 2048; 517 size = 2048;
516 break; 518 break;
517 default: 519 default:
518 printk(KERN_INFO PFX "Unknown page table size 0x%x, " 520 dev_info(&agp_bridge->dev->dev,
519 "assuming 512KB\n", 521 "unknown page table size 0x%x, assuming 512KB\n",
520 (gmch_ctrl & G33_PGETBL_SIZE_MASK)); 522 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
521 size = 512; 523 size = 512;
522 } 524 }
@@ -627,11 +629,11 @@ static void intel_i830_init_gtt_entries(void)
627 } 629 }
628 } 630 }
629 if (gtt_entries > 0) 631 if (gtt_entries > 0)
630 printk(KERN_INFO PFX "Detected %dK %s memory.\n", 632 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
631 gtt_entries / KB(1), local ? "local" : "stolen"); 633 gtt_entries / KB(1), local ? "local" : "stolen");
632 else 634 else
633 printk(KERN_INFO PFX 635 dev_info(&agp_bridge->dev->dev,
634 "No pre-allocated video memory detected.\n"); 636 "no pre-allocated video memory detected\n");
635 gtt_entries /= KB(4); 637 gtt_entries /= KB(4);
636 638
637 intel_private.gtt_entries = gtt_entries; 639 intel_private.gtt_entries = gtt_entries;
@@ -801,10 +803,12 @@ static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
801 num_entries = A_SIZE_FIX(temp)->num_entries; 803 num_entries = A_SIZE_FIX(temp)->num_entries;
802 804
803 if (pg_start < intel_private.gtt_entries) { 805 if (pg_start < intel_private.gtt_entries) {
804 printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", 806 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
805 pg_start, intel_private.gtt_entries); 807 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
808 pg_start, intel_private.gtt_entries);
806 809
807 printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); 810 dev_info(&intel_private.pcidev->dev,
811 "trying to insert into local/stolen memory\n");
808 goto out_err; 812 goto out_err;
809 } 813 }
810 814
@@ -851,7 +855,8 @@ static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
851 return 0; 855 return 0;
852 856
853 if (pg_start < intel_private.gtt_entries) { 857 if (pg_start < intel_private.gtt_entries) {
854 printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); 858 dev_info(&intel_private.pcidev->dev,
859 "trying to disable local/stolen memory\n");
855 return -EINVAL; 860 return -EINVAL;
856 } 861 }
857 862
@@ -957,7 +962,7 @@ static void intel_i9xx_setup_flush(void)
957 if (intel_private.ifp_resource.start) { 962 if (intel_private.ifp_resource.start) {
958 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); 963 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
959 if (!intel_private.i9xx_flush_page) 964 if (!intel_private.i9xx_flush_page)
960 printk(KERN_INFO "unable to ioremap flush page - no chipset flushing"); 965 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing");
961 } 966 }
962} 967}
963 968
@@ -1028,10 +1033,12 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1028 num_entries = A_SIZE_FIX(temp)->num_entries; 1033 num_entries = A_SIZE_FIX(temp)->num_entries;
1029 1034
1030 if (pg_start < intel_private.gtt_entries) { 1035 if (pg_start < intel_private.gtt_entries) {
1031 printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n", 1036 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1032 pg_start, intel_private.gtt_entries); 1037 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1038 pg_start, intel_private.gtt_entries);
1033 1039
1034 printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n"); 1040 dev_info(&intel_private.pcidev->dev,
1041 "trying to insert into local/stolen memory\n");
1035 goto out_err; 1042 goto out_err;
1036 } 1043 }
1037 1044
@@ -1078,7 +1085,8 @@ static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1078 return 0; 1085 return 0;
1079 1086
1080 if (pg_start < intel_private.gtt_entries) { 1087 if (pg_start < intel_private.gtt_entries) {
1081 printk(KERN_INFO PFX "Trying to disable local/stolen memory\n"); 1088 dev_info(&intel_private.pcidev->dev,
1089 "trying to disable local/stolen memory\n");
1082 return -EINVAL; 1090 return -EINVAL;
1083 } 1091 }
1084 1092
@@ -1182,7 +1190,7 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1182static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) 1190static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1183{ 1191{
1184 switch (agp_bridge->dev->device) { 1192 switch (agp_bridge->dev->device) {
1185 case PCI_DEVICE_ID_INTEL_IGD_HB: 1193 case PCI_DEVICE_ID_INTEL_GM45_HB:
1186 case PCI_DEVICE_ID_INTEL_IGD_E_HB: 1194 case PCI_DEVICE_ID_INTEL_IGD_E_HB:
1187 case PCI_DEVICE_ID_INTEL_Q45_HB: 1195 case PCI_DEVICE_ID_INTEL_Q45_HB:
1188 case PCI_DEVICE_ID_INTEL_G45_HB: 1196 case PCI_DEVICE_ID_INTEL_G45_HB:
@@ -1379,7 +1387,7 @@ static int intel_815_configure(void)
1379 /* the Intel 815 chipset spec. says that bits 29-31 in the 1387 /* the Intel 815 chipset spec. says that bits 29-31 in the
1380 * ATTBASE register are reserved -> try not to write them */ 1388 * ATTBASE register are reserved -> try not to write them */
1381 if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) { 1389 if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
1382 printk(KERN_EMERG PFX "gatt bus addr too high"); 1390 dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high");
1383 return -EINVAL; 1391 return -EINVAL;
1384 } 1392 }
1385 1393
@@ -2117,8 +2125,8 @@ static const struct intel_driver_description {
2117 NULL, &intel_g33_driver }, 2125 NULL, &intel_g33_driver },
2118 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2126 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2119 NULL, &intel_g33_driver }, 2127 NULL, &intel_g33_driver },
2120 { PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0, 2128 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2121 "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, 2129 "Mobile Intel? GM45 Express", NULL, &intel_i965_driver },
2122 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, 2130 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
2123 "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, 2131 "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
2124 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, 2132 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
@@ -2163,8 +2171,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2163 2171
2164 if (intel_agp_chipsets[i].name == NULL) { 2172 if (intel_agp_chipsets[i].name == NULL) {
2165 if (cap_ptr) 2173 if (cap_ptr)
2166 printk(KERN_WARNING PFX "Unsupported Intel chipset" 2174 dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
2167 "(device id: %04x)\n", pdev->device); 2175 pdev->vendor, pdev->device);
2168 agp_put_bridge(bridge); 2176 agp_put_bridge(bridge);
2169 return -ENODEV; 2177 return -ENODEV;
2170 } 2178 }
@@ -2172,9 +2180,8 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2172 if (bridge->driver == NULL) { 2180 if (bridge->driver == NULL) {
2173 /* bridge has no AGP and no IGD detected */ 2181 /* bridge has no AGP and no IGD detected */
2174 if (cap_ptr) 2182 if (cap_ptr)
2175 printk(KERN_WARNING PFX "Failed to find bridge device " 2183 dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
2176 "(chip_id: %04x)\n", 2184 intel_agp_chipsets[i].gmch_chip_id);
2177 intel_agp_chipsets[i].gmch_chip_id);
2178 agp_put_bridge(bridge); 2185 agp_put_bridge(bridge);
2179 return -ENODEV; 2186 return -ENODEV;
2180 } 2187 }
@@ -2183,8 +2190,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2183 bridge->capndx = cap_ptr; 2190 bridge->capndx = cap_ptr;
2184 bridge->dev_private_data = &intel_private; 2191 bridge->dev_private_data = &intel_private;
2185 2192
2186 printk(KERN_INFO PFX "Detected an Intel %s Chipset.\n", 2193 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
2187 intel_agp_chipsets[i].name);
2188 2194
2189 /* 2195 /*
2190 * The following fixes the case where the BIOS has "forgotten" to 2196 * The following fixes the case where the BIOS has "forgotten" to
@@ -2194,7 +2200,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2194 r = &pdev->resource[0]; 2200 r = &pdev->resource[0];
2195 if (!r->start && r->end) { 2201 if (!r->start && r->end) {
2196 if (pci_assign_resource(pdev, 0)) { 2202 if (pci_assign_resource(pdev, 0)) {
2197 printk(KERN_ERR PFX "could not assign resource 0\n"); 2203 dev_err(&pdev->dev, "can't assign resource 0\n");
2198 agp_put_bridge(bridge); 2204 agp_put_bridge(bridge);
2199 return -ENODEV; 2205 return -ENODEV;
2200 } 2206 }
@@ -2206,7 +2212,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2206 * 20030610 - hamish@zot.org 2212 * 20030610 - hamish@zot.org
2207 */ 2213 */
2208 if (pci_enable_device(pdev)) { 2214 if (pci_enable_device(pdev)) {
2209 printk(KERN_ERR PFX "Unable to Enable PCI device\n"); 2215 dev_err(&pdev->dev, "can't enable PCI device\n");
2210 agp_put_bridge(bridge); 2216 agp_put_bridge(bridge);
2211 return -ENODEV; 2217 return -ENODEV;
2212 } 2218 }
@@ -2238,6 +2244,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
2238static int agp_intel_resume(struct pci_dev *pdev) 2244static int agp_intel_resume(struct pci_dev *pdev)
2239{ 2245{
2240 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 2246 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
2247 int ret_val;
2241 2248
2242 pci_restore_state(pdev); 2249 pci_restore_state(pdev);
2243 2250
@@ -2265,6 +2272,10 @@ static int agp_intel_resume(struct pci_dev *pdev)
2265 else if (bridge->driver == &intel_i965_driver) 2272 else if (bridge->driver == &intel_i965_driver)
2266 intel_i915_configure(); 2273 intel_i915_configure();
2267 2274
2275 ret_val = agp_rebind_memory();
2276 if (ret_val != 0)
2277 return ret_val;
2278
2268 return 0; 2279 return 0;
2269} 2280}
2270#endif 2281#endif
@@ -2315,7 +2326,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
2315 ID(PCI_DEVICE_ID_INTEL_G33_HB), 2326 ID(PCI_DEVICE_ID_INTEL_G33_HB),
2316 ID(PCI_DEVICE_ID_INTEL_Q35_HB), 2327 ID(PCI_DEVICE_ID_INTEL_Q35_HB),
2317 ID(PCI_DEVICE_ID_INTEL_Q33_HB), 2328 ID(PCI_DEVICE_ID_INTEL_Q33_HB),
2318 ID(PCI_DEVICE_ID_INTEL_IGD_HB), 2329 ID(PCI_DEVICE_ID_INTEL_GM45_HB),
2319 ID(PCI_DEVICE_ID_INTEL_IGD_E_HB), 2330 ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
2320 ID(PCI_DEVICE_ID_INTEL_Q45_HB), 2331 ID(PCI_DEVICE_ID_INTEL_Q45_HB),
2321 ID(PCI_DEVICE_ID_INTEL_G45_HB), 2332 ID(PCI_DEVICE_ID_INTEL_G45_HB),
diff --git a/drivers/char/agp/isoch.c b/drivers/char/agp/isoch.c
index 3f9ccde62377..c73385cc4b8a 100644
--- a/drivers/char/agp/isoch.c
+++ b/drivers/char/agp/isoch.c
@@ -153,7 +153,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
153 153
154 /* Check if this configuration has any chance of working */ 154 /* Check if this configuration has any chance of working */
155 if (tot_bw > target.maxbw) { 155 if (tot_bw > target.maxbw) {
156 printk(KERN_ERR PFX "isochronous bandwidth required " 156 dev_err(&td->dev, "isochronous bandwidth required "
157 "by AGP 3.0 devices exceeds that which is supported by " 157 "by AGP 3.0 devices exceeds that which is supported by "
158 "the AGP 3.0 bridge!\n"); 158 "the AGP 3.0 bridge!\n");
159 ret = -ENODEV; 159 ret = -ENODEV;
@@ -188,7 +188,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
188 /* Exit if the minimal ISOCH_N allocation among the masters is more 188 /* Exit if the minimal ISOCH_N allocation among the masters is more
189 * than the target can handle. */ 189 * than the target can handle. */
190 if (tot_n > target.n) { 190 if (tot_n > target.n) {
191 printk(KERN_ERR PFX "number of isochronous " 191 dev_err(&td->dev, "number of isochronous "
192 "transactions per period required by AGP 3.0 devices " 192 "transactions per period required by AGP 3.0 devices "
193 "exceeds that which is supported by the AGP 3.0 " 193 "exceeds that which is supported by the AGP 3.0 "
194 "bridge!\n"); 194 "bridge!\n");
@@ -229,7 +229,7 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
229 /* Exit if the minimal RQ needs of the masters exceeds what the target 229 /* Exit if the minimal RQ needs of the masters exceeds what the target
230 * can provide. */ 230 * can provide. */
231 if (tot_rq > rq_isoch) { 231 if (tot_rq > rq_isoch) {
232 printk(KERN_ERR PFX "number of request queue slots " 232 dev_err(&td->dev, "number of request queue slots "
233 "required by the isochronous bandwidth requested by " 233 "required by the isochronous bandwidth requested by "
234 "AGP 3.0 devices exceeds the number provided by the " 234 "AGP 3.0 devices exceeds the number provided by the "
235 "AGP 3.0 bridge!\n"); 235 "AGP 3.0 bridge!\n");
@@ -359,8 +359,9 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
359 case 0x0001: /* Unclassified device */ 359 case 0x0001: /* Unclassified device */
360 /* Don't know what this is, but log it for investigation. */ 360 /* Don't know what this is, but log it for investigation. */
361 if (mcapndx != 0) { 361 if (mcapndx != 0) {
362 printk (KERN_INFO PFX "Wacky, found unclassified AGP device. %x:%x\n", 362 dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
363 dev->vendor, dev->device); 363 pci_name(dev),
364 dev->vendor, dev->device);
364 } 365 }
365 continue; 366 continue;
366 367
@@ -407,17 +408,18 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
407 } 408 }
408 409
409 if (mcapndx == 0) { 410 if (mcapndx == 0) {
410 printk(KERN_ERR PFX "woah! Non-AGP device " 411 dev_err(&td->dev, "woah! Non-AGP device %s on "
411 "found on the secondary bus of an AGP 3.5 bridge!\n"); 412 "secondary bus of AGP 3.5 bridge!\n",
413 pci_name(dev));
412 ret = -ENODEV; 414 ret = -ENODEV;
413 goto free_and_exit; 415 goto free_and_exit;
414 } 416 }
415 417
416 mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; 418 mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
417 if (mmajor < 3) { 419 if (mmajor < 3) {
418 printk(KERN_ERR PFX "woah! AGP 2.0 device " 420 dev_err(&td->dev, "woah! AGP 2.0 device %s on "
419 "found on the secondary bus of an AGP 3.5 " 421 "secondary bus of AGP 3.5 bridge operating "
420 "bridge operating with AGP 3.0 electricals!\n"); 422 "with AGP 3.0 electricals!\n", pci_name(dev));
421 ret = -ENODEV; 423 ret = -ENODEV;
422 goto free_and_exit; 424 goto free_and_exit;
423 } 425 }
@@ -427,10 +429,10 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
427 pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); 429 pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
428 430
429 if (((mstatus >> 3) & 0x1) == 0) { 431 if (((mstatus >> 3) & 0x1) == 0) {
430 printk(KERN_ERR PFX "woah! AGP 3.x device " 432 dev_err(&td->dev, "woah! AGP 3.x device %s not "
431 "not operating in AGP 3.x mode found on the " 433 "operating in AGP 3.x mode on secondary bus "
432 "secondary bus of an AGP 3.5 bridge operating " 434 "of AGP 3.5 bridge operating with AGP 3.0 "
433 "with AGP 3.0 electricals!\n"); 435 "electricals!\n", pci_name(dev));
434 ret = -ENODEV; 436 ret = -ENODEV;
435 goto free_and_exit; 437 goto free_and_exit;
436 } 438 }
@@ -444,9 +446,9 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
444 if (isoch) { 446 if (isoch) {
445 ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs); 447 ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
446 if (ret) { 448 if (ret) {
447 printk(KERN_INFO PFX "Something bad happened setting " 449 dev_info(&td->dev, "something bad happened setting "
448 "up isochronous xfers. Falling back to " 450 "up isochronous xfers; falling back to "
449 "non-isochronous xfer mode.\n"); 451 "non-isochronous xfer mode\n");
450 } else { 452 } else {
451 goto free_and_exit; 453 goto free_and_exit;
452 } 454 }
@@ -466,4 +468,3 @@ free_and_exit:
466get_out: 468get_out:
467 return ret; 469 return ret;
468} 470}
469
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index b6791846809f..2587ef96a960 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -79,10 +79,8 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
79 u32 command; 79 u32 command;
80 int rate; 80 int rate;
81 81
82 printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", 82 dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
83 agp_bridge->major_version, 83 agp_bridge->major_version, agp_bridge->minor_version);
84 agp_bridge->minor_version,
85 pci_name(agp_bridge->dev));
86 84
87 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command); 85 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
88 command = agp_collect_device_status(bridge, mode, command); 86 command = agp_collect_device_status(bridge, mode, command);
@@ -94,8 +92,8 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
94 if (!agp) 92 if (!agp)
95 continue; 93 continue;
96 94
97 printk(KERN_INFO PFX "Putting AGP V3 device at %s into %dx mode\n", 95 dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n",
98 pci_name(device), rate); 96 pci_name(device), rate);
99 97
100 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command); 98 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
101 99
@@ -105,7 +103,7 @@ static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
105 * cannot be configured 103 * cannot be configured
106 */ 104 */
107 if (device->device == bridge->dev->device) { 105 if (device->device == bridge->dev->device) {
108 printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n"); 106 dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n");
109 msleep(10); 107 msleep(10);
110 } 108 }
111 } 109 }
@@ -190,7 +188,8 @@ static int __devinit agp_sis_probe(struct pci_dev *pdev,
190 return -ENODEV; 188 return -ENODEV;
191 189
192 190
193 printk(KERN_INFO PFX "Detected SiS chipset - id:%i\n", pdev->device); 191 dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n",
192 pdev->vendor, pdev->device);
194 bridge = agp_alloc_bridge(); 193 bridge = agp_alloc_bridge();
195 if (!bridge) 194 if (!bridge)
196 return -ENOMEM; 195 return -ENOMEM;
@@ -242,7 +241,7 @@ static struct pci_device_id agp_sis_pci_table[] = {
242 .class = (PCI_CLASS_BRIDGE_HOST << 8), 241 .class = (PCI_CLASS_BRIDGE_HOST << 8),
243 .class_mask = ~0, 242 .class_mask = ~0,
244 .vendor = PCI_VENDOR_ID_SI, 243 .vendor = PCI_VENDOR_ID_SI,
245 .device = PCI_DEVICE_ID_SI_5591_AGP, 244 .device = PCI_DEVICE_ID_SI_5591,
246 .subvendor = PCI_ANY_ID, 245 .subvendor = PCI_ANY_ID,
247 .subdevice = PCI_ANY_ID, 246 .subdevice = PCI_ANY_ID,
248 }, 247 },
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 0e054c134490..2fb27fe4c10c 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -241,7 +241,8 @@ static void serverworks_tlbflush(struct agp_memory *temp)
241 while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) { 241 while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
242 cpu_relax(); 242 cpu_relax();
243 if (time_after(jiffies, timeout)) { 243 if (time_after(jiffies, timeout)) {
244 printk(KERN_ERR PFX "TLB post flush took more than 3 seconds\n"); 244 dev_err(&serverworks_private.svrwrks_dev->dev,
245 "TLB post flush took more than 3 seconds\n");
245 break; 246 break;
246 } 247 }
247 } 248 }
@@ -251,7 +252,8 @@ static void serverworks_tlbflush(struct agp_memory *temp)
251 while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) { 252 while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
252 cpu_relax(); 253 cpu_relax();
253 if (time_after(jiffies, timeout)) { 254 if (time_after(jiffies, timeout)) {
254 printk(KERN_ERR PFX "TLB Dir flush took more than 3 seconds\n"); 255 dev_err(&serverworks_private.svrwrks_dev->dev,
256 "TLB Dir flush took more than 3 seconds\n");
255 break; 257 break;
256 } 258 }
257 } 259 }
@@ -271,7 +273,7 @@ static int serverworks_configure(void)
271 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK); 273 temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
272 serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096); 274 serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
273 if (!serverworks_private.registers) { 275 if (!serverworks_private.registers) {
274 printk (KERN_ERR PFX "Unable to ioremap() memory.\n"); 276 dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
275 return -ENOMEM; 277 return -ENOMEM;
276 } 278 }
277 279
@@ -451,7 +453,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
451 453
452 switch (pdev->device) { 454 switch (pdev->device) {
453 case 0x0006: 455 case 0x0006:
454 printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n"); 456 dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
455 return -ENODEV; 457 return -ENODEV;
456 458
457 case PCI_DEVICE_ID_SERVERWORKS_HE: 459 case PCI_DEVICE_ID_SERVERWORKS_HE:
@@ -461,8 +463,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
461 463
462 default: 464 default:
463 if (cap_ptr) 465 if (cap_ptr)
464 printk(KERN_ERR PFX "Unsupported Serverworks chipset " 466 dev_err(&pdev->dev, "unsupported Serverworks chipset "
465 "(device id: %04x)\n", pdev->device); 467 "[%04x/%04x]\n", pdev->vendor, pdev->device);
466 return -ENODEV; 468 return -ENODEV;
467 } 469 }
468 470
@@ -470,8 +472,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
470 bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number, 472 bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
471 PCI_DEVFN(0, 1)); 473 PCI_DEVFN(0, 1));
472 if (!bridge_dev) { 474 if (!bridge_dev) {
473 printk(KERN_INFO PFX "Detected a Serverworks chipset " 475 dev_info(&pdev->dev, "can't find secondary device\n");
474 "but could not find the secondary device.\n");
475 return -ENODEV; 476 return -ENODEV;
476 } 477 }
477 478
@@ -482,8 +483,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
482 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) { 483 if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
483 pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2); 484 pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
484 if (temp2 != 0) { 485 if (temp2 != 0) {
485 printk(KERN_INFO PFX "Detected 64 bit aperture address, " 486 dev_info(&pdev->dev, "64 bit aperture address, "
486 "but top bits are not zero. Disabling agp\n"); 487 "but top bits are not zero; disabling AGP\n");
487 return -ENODEV; 488 return -ENODEV;
488 } 489 }
489 serverworks_private.mm_addr_ofs = 0x18; 490 serverworks_private.mm_addr_ofs = 0x18;
@@ -495,8 +496,8 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
495 pci_read_config_dword(pdev, 496 pci_read_config_dword(pdev,
496 serverworks_private.mm_addr_ofs + 4, &temp2); 497 serverworks_private.mm_addr_ofs + 4, &temp2);
497 if (temp2 != 0) { 498 if (temp2 != 0) {
498 printk(KERN_INFO PFX "Detected 64 bit MMIO address, " 499 dev_info(&pdev->dev, "64 bit MMIO address, but top "
499 "but top bits are not zero. Disabling agp\n"); 500 "bits are not zero; disabling AGP\n");
500 return -ENODEV; 501 return -ENODEV;
501 } 502 }
502 } 503 }
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index d2fa3cfca02a..eef72709ec53 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -46,8 +46,8 @@ static int uninorth_fetch_size(void)
46 break; 46 break;
47 47
48 if (i == agp_bridge->driver->num_aperture_sizes) { 48 if (i == agp_bridge->driver->num_aperture_sizes) {
49 printk(KERN_ERR PFX "Invalid aperture size, using" 49 dev_err(&agp_bridge->dev->dev, "invalid aperture size, "
50 " default\n"); 50 "using default\n");
51 size = 0; 51 size = 0;
52 aperture = NULL; 52 aperture = NULL;
53 } 53 }
@@ -108,8 +108,8 @@ static int uninorth_configure(void)
108 108
109 current_size = A_SIZE_32(agp_bridge->current_size); 109 current_size = A_SIZE_32(agp_bridge->current_size);
110 110
111 printk(KERN_INFO PFX "configuring for size idx: %d\n", 111 dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n",
112 current_size->size_value); 112 current_size->size_value);
113 113
114 /* aperture size and gatt addr */ 114 /* aperture size and gatt addr */
115 pci_write_config_dword(agp_bridge->dev, 115 pci_write_config_dword(agp_bridge->dev,
@@ -197,8 +197,9 @@ static int u3_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
197 gp = (u32 *) &agp_bridge->gatt_table[pg_start]; 197 gp = (u32 *) &agp_bridge->gatt_table[pg_start];
198 for (i = 0; i < mem->page_count; ++i) { 198 for (i = 0; i < mem->page_count; ++i) {
199 if (gp[i]) { 199 if (gp[i]) {
200 printk("u3_insert_memory: entry 0x%x occupied (%x)\n", 200 dev_info(&agp_bridge->dev->dev,
201 i, gp[i]); 201 "u3_insert_memory: entry 0x%x occupied (%x)\n",
202 i, gp[i]);
202 return -EBUSY; 203 return -EBUSY;
203 } 204 }
204 } 205 }
@@ -276,8 +277,8 @@ static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
276 &scratch); 277 &scratch);
277 } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000); 278 } while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
278 if ((scratch & PCI_AGP_COMMAND_AGP) == 0) 279 if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
279 printk(KERN_ERR PFX "failed to write UniNorth AGP" 280 dev_err(&bridge->dev->dev, "can't write UniNorth AGP "
280 " command register\n"); 281 "command register\n");
281 282
282 if (uninorth_rev >= 0x30) { 283 if (uninorth_rev >= 0x30) {
283 /* This is an AGP V3 */ 284 /* This is an AGP V3 */
@@ -330,8 +331,8 @@ static int agp_uninorth_suspend(struct pci_dev *pdev)
330 pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd); 331 pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
331 if (!(cmd & PCI_AGP_COMMAND_AGP)) 332 if (!(cmd & PCI_AGP_COMMAND_AGP))
332 continue; 333 continue;
333 printk("uninorth-agp: disabling AGP on device %s\n", 334 dev_info(&pdev->dev, "disabling AGP on device %s\n",
334 pci_name(device)); 335 pci_name(device));
335 cmd &= ~PCI_AGP_COMMAND_AGP; 336 cmd &= ~PCI_AGP_COMMAND_AGP;
336 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd); 337 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
337 } 338 }
@@ -341,8 +342,7 @@ static int agp_uninorth_suspend(struct pci_dev *pdev)
341 pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd); 342 pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
342 bridge->dev_private_data = (void *)(long)cmd; 343 bridge->dev_private_data = (void *)(long)cmd;
343 if (cmd & PCI_AGP_COMMAND_AGP) { 344 if (cmd & PCI_AGP_COMMAND_AGP) {
344 printk("uninorth-agp: disabling AGP on bridge %s\n", 345 dev_info(&pdev->dev, "disabling AGP on bridge\n");
345 pci_name(pdev));
346 cmd &= ~PCI_AGP_COMMAND_AGP; 346 cmd &= ~PCI_AGP_COMMAND_AGP;
347 pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd); 347 pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
348 } 348 }
@@ -591,14 +591,14 @@ static int __devinit agp_uninorth_probe(struct pci_dev *pdev,
591 /* probe for known chipsets */ 591 /* probe for known chipsets */
592 for (j = 0; devs[j].chipset_name != NULL; ++j) { 592 for (j = 0; devs[j].chipset_name != NULL; ++j) {
593 if (pdev->device == devs[j].device_id) { 593 if (pdev->device == devs[j].device_id) {
594 printk(KERN_INFO PFX "Detected Apple %s chipset\n", 594 dev_info(&pdev->dev, "Apple %s chipset\n",
595 devs[j].chipset_name); 595 devs[j].chipset_name);
596 goto found; 596 goto found;
597 } 597 }
598 } 598 }
599 599
600 printk(KERN_ERR PFX "Unsupported Apple chipset (device id: %04x).\n", 600 dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n",
601 pdev->device); 601 pdev->vendor, pdev->device);
602 return -ENODEV; 602 return -ENODEV;
603 603
604 found: 604 found:
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 02aac104842d..fd64137b1ab9 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -322,11 +322,10 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
322 322
323 hp->tty = tty; 323 hp->tty = tty;
324 324
325 if (hp->ops->notifier_add)
326 rc = hp->ops->notifier_add(hp, hp->data);
327
328 spin_unlock_irqrestore(&hp->lock, flags); 325 spin_unlock_irqrestore(&hp->lock, flags);
329 326
327 if (hp->ops->notifier_add)
328 rc = hp->ops->notifier_add(hp, hp->data);
330 329
331 /* 330 /*
332 * If the notifier fails we return an error. The tty layer 331 * If the notifier fails we return an error. The tty layer
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index f7feae4ebb5e..128202e18fc9 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -31,6 +31,7 @@
31#include <asm/io.h> 31#include <asm/io.h>
32#include <asm/msr.h> 32#include <asm/msr.h>
33#include <asm/cpufeature.h> 33#include <asm/cpufeature.h>
34#include <asm/i387.h>
34 35
35 36
36#define PFX KBUILD_MODNAME ": " 37#define PFX KBUILD_MODNAME ": "
@@ -67,16 +68,23 @@ enum {
67 * Another possible performance boost may come from simply buffering 68 * Another possible performance boost may come from simply buffering
68 * until we have 4 bytes, thus returning a u32 at a time, 69 * until we have 4 bytes, thus returning a u32 at a time,
69 * instead of the current u8-at-a-time. 70 * instead of the current u8-at-a-time.
71 *
72 * Padlock instructions can generate a spurious DNA fault, so
73 * we have to call them in the context of irq_ts_save/restore()
70 */ 74 */
71 75
72static inline u32 xstore(u32 *addr, u32 edx_in) 76static inline u32 xstore(u32 *addr, u32 edx_in)
73{ 77{
74 u32 eax_out; 78 u32 eax_out;
79 int ts_state;
80
81 ts_state = irq_ts_save();
75 82
76 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" 83 asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
77 :"=m"(*addr), "=a"(eax_out) 84 :"=m"(*addr), "=a"(eax_out)
78 :"D"(addr), "d"(edx_in)); 85 :"D"(addr), "d"(edx_in));
79 86
87 irq_ts_restore(ts_state);
80 return eax_out; 88 return eax_out;
81} 89}
82 90
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
index b1414507997c..3a23e7694d55 100644
--- a/drivers/char/pcmcia/ipwireless/tty.c
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -29,7 +29,6 @@
29#include <linux/tty_driver.h> 29#include <linux/tty_driver.h>
30#include <linux/tty_flip.h> 30#include <linux/tty_flip.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/version.h>
33 32
34#include "tty.h" 33#include "tty.h"
35#include "network.h" 34#include "network.h"
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index d9799e2bcfbf..f53d4d00faf0 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -78,7 +78,6 @@
78#include <linux/wait.h> 78#include <linux/wait.h>
79#include <linux/bcd.h> 79#include <linux/bcd.h>
80#include <linux/delay.h> 80#include <linux/delay.h>
81#include <linux/smp_lock.h>
82#include <linux/uaccess.h> 81#include <linux/uaccess.h>
83 82
84#include <asm/current.h> 83#include <asm/current.h>
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 509c89ac5bd3..08911ed66494 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -47,7 +47,6 @@
47 47
48 48
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/version.h>
51#include <linux/errno.h> 50#include <linux/errno.h>
52#include <linux/signal.h> 51#include <linux/signal.h>
53#include <linux/sched.h> 52#include <linux/sched.h>
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index e1b46bc7e43c..a27160ba21d7 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1161,8 +1161,8 @@ void disassociate_ctty(int on_exit)
1161 tty = get_current_tty(); 1161 tty = get_current_tty();
1162 if (tty) { 1162 if (tty) {
1163 tty_pgrp = get_pid(tty->pgrp); 1163 tty_pgrp = get_pid(tty->pgrp);
1164 mutex_unlock(&tty_mutex);
1165 lock_kernel(); 1164 lock_kernel();
1165 mutex_unlock(&tty_mutex);
1166 /* XXX: here we race, there is nothing protecting tty */ 1166 /* XXX: here we race, there is nothing protecting tty */
1167 if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) 1167 if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
1168 tty_vhangup(tty); 1168 tty_vhangup(tty);
@@ -2496,45 +2496,25 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
2496} 2496}
2497 2497
2498/** 2498/**
2499 * tiocswinsz - implement window size set ioctl 2499 * tty_do_resize - resize event
2500 * @tty; tty 2500 * @tty: tty being resized
2501 * @arg: user buffer for result 2501 * @real_tty: real tty (if using a pty/tty pair)
2502 * @rows: rows (character)
2503 * @cols: cols (character)
2502 * 2504 *
2503 * Copies the user idea of the window size to the kernel. Traditionally 2505 * Update the termios variables and send the neccessary signals to
2504 * this is just advisory information but for the Linux console it 2506 * peform a terminal resize correctly
2505 * actually has driver level meaning and triggers a VC resize.
2506 *
2507 * Locking:
2508 * Called function use the console_sem is used to ensure we do
2509 * not try and resize the console twice at once.
2510 * The tty->termios_mutex is used to ensure we don't double
2511 * resize and get confused. Lock order - tty->termios_mutex before
2512 * console sem
2513 */ 2507 */
2514 2508
2515static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, 2509int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty,
2516 struct winsize __user *arg) 2510 struct winsize *ws)
2517{ 2511{
2518 struct winsize tmp_ws;
2519 struct pid *pgrp, *rpgrp; 2512 struct pid *pgrp, *rpgrp;
2520 unsigned long flags; 2513 unsigned long flags;
2521 2514
2522 if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
2523 return -EFAULT;
2524
2525 mutex_lock(&tty->termios_mutex); 2515 mutex_lock(&tty->termios_mutex);
2526 if (!memcmp(&tmp_ws, &tty->winsize, sizeof(*arg))) 2516 if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
2527 goto done; 2517 goto done;
2528
2529#ifdef CONFIG_VT
2530 if (tty->driver->type == TTY_DRIVER_TYPE_CONSOLE) {
2531 if (vc_lock_resize(tty->driver_data, tmp_ws.ws_col,
2532 tmp_ws.ws_row)) {
2533 mutex_unlock(&tty->termios_mutex);
2534 return -ENXIO;
2535 }
2536 }
2537#endif
2538 /* Get the PID values and reference them so we can 2518 /* Get the PID values and reference them so we can
2539 avoid holding the tty ctrl lock while sending signals */ 2519 avoid holding the tty ctrl lock while sending signals */
2540 spin_lock_irqsave(&tty->ctrl_lock, flags); 2520 spin_lock_irqsave(&tty->ctrl_lock, flags);
@@ -2550,14 +2530,42 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
2550 put_pid(pgrp); 2530 put_pid(pgrp);
2551 put_pid(rpgrp); 2531 put_pid(rpgrp);
2552 2532
2553 tty->winsize = tmp_ws; 2533 tty->winsize = *ws;
2554 real_tty->winsize = tmp_ws; 2534 real_tty->winsize = *ws;
2555done: 2535done:
2556 mutex_unlock(&tty->termios_mutex); 2536 mutex_unlock(&tty->termios_mutex);
2557 return 0; 2537 return 0;
2558} 2538}
2559 2539
2560/** 2540/**
2541 * tiocswinsz - implement window size set ioctl
2542 * @tty; tty
2543 * @arg: user buffer for result
2544 *
2545 * Copies the user idea of the window size to the kernel. Traditionally
2546 * this is just advisory information but for the Linux console it
2547 * actually has driver level meaning and triggers a VC resize.
2548 *
2549 * Locking:
2550 * Driver dependant. The default do_resize method takes the
2551 * tty termios mutex and ctrl_lock. The console takes its own lock
2552 * then calls into the default method.
2553 */
2554
2555static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
2556 struct winsize __user *arg)
2557{
2558 struct winsize tmp_ws;
2559 if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
2560 return -EFAULT;
2561
2562 if (tty->ops->resize)
2563 return tty->ops->resize(tty, real_tty, &tmp_ws);
2564 else
2565 return tty_do_resize(tty, real_tty, &tmp_ws);
2566}
2567
2568/**
2561 * tioccons - allow admin to move logical console 2569 * tioccons - allow admin to move logical console
2562 * @file: the file to become console 2570 * @file: the file to become console
2563 * 2571 *
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 1bc00c9d860d..60359c360912 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -803,7 +803,25 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
803 */ 803 */
804#define VC_RESIZE_MAXCOL (32767) 804#define VC_RESIZE_MAXCOL (32767)
805#define VC_RESIZE_MAXROW (32767) 805#define VC_RESIZE_MAXROW (32767)
806int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) 806
807/**
808 * vc_do_resize - resizing method for the tty
809 * @tty: tty being resized
810 * @real_tty: real tty (different to tty if a pty/tty pair)
811 * @vc: virtual console private data
812 * @cols: columns
813 * @lines: lines
814 *
815 * Resize a virtual console, clipping according to the actual constraints.
816 * If the caller passes a tty structure then update the termios winsize
817 * information and perform any neccessary signal handling.
818 *
819 * Caller must hold the console semaphore. Takes the termios mutex and
820 * ctrl_lock of the tty IFF a tty is passed.
821 */
822
823static int vc_do_resize(struct tty_struct *tty, struct tty_struct *real_tty,
824 struct vc_data *vc, unsigned int cols, unsigned int lines)
807{ 825{
808 unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0; 826 unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
809 unsigned int old_cols, old_rows, old_row_size, old_screen_size; 827 unsigned int old_cols, old_rows, old_row_size, old_screen_size;
@@ -907,24 +925,15 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
907 gotoxy(vc, vc->vc_x, vc->vc_y); 925 gotoxy(vc, vc->vc_x, vc->vc_y);
908 save_cur(vc); 926 save_cur(vc);
909 927
910 if (vc->vc_tty) { 928 if (tty) {
911 struct winsize ws, *cws = &vc->vc_tty->winsize; 929 /* Rewrite the requested winsize data with the actual
912 struct pid *pgrp = NULL; 930 resulting sizes */
913 931 struct winsize ws;
914 memset(&ws, 0, sizeof(ws)); 932 memset(&ws, 0, sizeof(ws));
915 ws.ws_row = vc->vc_rows; 933 ws.ws_row = vc->vc_rows;
916 ws.ws_col = vc->vc_cols; 934 ws.ws_col = vc->vc_cols;
917 ws.ws_ypixel = vc->vc_scan_lines; 935 ws.ws_ypixel = vc->vc_scan_lines;
918 936 tty_do_resize(tty, real_tty, &ws);
919 spin_lock_irq(&vc->vc_tty->ctrl_lock);
920 if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col))
921 pgrp = get_pid(vc->vc_tty->pgrp);
922 spin_unlock_irq(&vc->vc_tty->ctrl_lock);
923 if (pgrp) {
924 kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1);
925 put_pid(pgrp);
926 }
927 *cws = ws;
928 } 937 }
929 938
930 if (CON_IS_VISIBLE(vc)) 939 if (CON_IS_VISIBLE(vc))
@@ -932,14 +941,47 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
932 return err; 941 return err;
933} 942}
934 943
935int vc_lock_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) 944/**
945 * vc_resize - resize a VT
946 * @vc: virtual console
947 * @cols: columns
948 * @rows: rows
949 *
950 * Resize a virtual console as seen from the console end of things. We
951 * use the common vc_do_resize methods to update the structures. The
952 * caller must hold the console sem to protect console internals and
953 * vc->vc_tty
954 */
955
956int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
957{
958 return vc_do_resize(vc->vc_tty, vc->vc_tty, vc, cols, rows);
959}
960
961/**
962 * vt_resize - resize a VT
963 * @tty: tty to resize
964 * @real_tty: tty if a pty/tty pair
965 * @ws: winsize attributes
966 *
967 * Resize a virtual terminal. This is called by the tty layer as we
968 * register our own handler for resizing. The mutual helper does all
969 * the actual work.
970 *
971 * Takes the console sem and the called methods then take the tty
972 * termios_mutex and the tty ctrl_lock in that order.
973 */
974
975int vt_resize(struct tty_struct *tty, struct tty_struct *real_tty,
976 struct winsize *ws)
936{ 977{
937 int rc; 978 struct vc_data *vc = tty->driver_data;
979 int ret;
938 980
939 acquire_console_sem(); 981 acquire_console_sem();
940 rc = vc_resize(vc, cols, lines); 982 ret = vc_do_resize(tty, real_tty, vc, ws->ws_col, ws->ws_row);
941 release_console_sem(); 983 release_console_sem();
942 return rc; 984 return ret;
943} 985}
944 986
945void vc_deallocate(unsigned int currcons) 987void vc_deallocate(unsigned int currcons)
@@ -2907,6 +2949,7 @@ static const struct tty_operations con_ops = {
2907 .start = con_start, 2949 .start = con_start,
2908 .throttle = con_throttle, 2950 .throttle = con_throttle,
2909 .unthrottle = con_unthrottle, 2951 .unthrottle = con_unthrottle,
2952 .resize = vt_resize,
2910}; 2953};
2911 2954
2912int __init vty_init(void) 2955int __init vty_init(void)
@@ -4061,7 +4104,6 @@ EXPORT_SYMBOL(default_blu);
4061EXPORT_SYMBOL(update_region); 4104EXPORT_SYMBOL(update_region);
4062EXPORT_SYMBOL(redraw_screen); 4105EXPORT_SYMBOL(redraw_screen);
4063EXPORT_SYMBOL(vc_resize); 4106EXPORT_SYMBOL(vc_resize);
4064EXPORT_SYMBOL(vc_lock_resize);
4065EXPORT_SYMBOL(fg_console); 4107EXPORT_SYMBOL(fg_console);
4066EXPORT_SYMBOL(console_blank_hook); 4108EXPORT_SYMBOL(console_blank_hook);
4067EXPORT_SYMBOL(console_blanked); 4109EXPORT_SYMBOL(console_blanked);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 3211afd9d57e..c904e9ad4a71 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -947,14 +947,16 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
947 get_user(cc, &vtsizes->v_cols)) 947 get_user(cc, &vtsizes->v_cols))
948 ret = -EFAULT; 948 ret = -EFAULT;
949 else { 949 else {
950 acquire_console_sem();
950 for (i = 0; i < MAX_NR_CONSOLES; i++) { 951 for (i = 0; i < MAX_NR_CONSOLES; i++) {
951 vc = vc_cons[i].d; 952 vc = vc_cons[i].d;
952 953
953 if (vc) { 954 if (vc) {
954 vc->vc_resize_user = 1; 955 vc->vc_resize_user = 1;
955 vc_lock_resize(vc_cons[i].d, cc, ll); 956 vc_resize(vc_cons[i].d, cc, ll);
956 } 957 }
957 } 958 }
959 release_console_sem();
958 } 960 }
959 break; 961 break;
960 } 962 }
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 8bfee5fb7223..278c9857bcf5 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -74,7 +74,6 @@
74 * currently programmed in the FPGA. 74 * currently programmed in the FPGA.
75 */ 75 */
76 76
77#include <linux/version.h>
78#include <linux/module.h> 77#include <linux/module.h>
79#include <linux/kernel.h> 78#include <linux/kernel.h>
80#include <linux/types.h> 79#include <linux/types.h>
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ba7b9a6b17a1..a4bec3f919aa 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -67,10 +67,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices); 67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
68 struct ladder_device_state *last_state; 68 struct ladder_device_state *last_state;
69 int last_residency, last_idx = ldev->last_state_idx; 69 int last_residency, last_idx = ldev->last_state_idx;
70 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
70 71
71 if (unlikely(!ldev)) 72 if (unlikely(!ldev))
72 return 0; 73 return 0;
73 74
75 /* Special case when user has set very strict latency requirement */
76 if (unlikely(latency_req == 0)) {
77 ladder_do_selection(ldev, last_idx, 0);
78 return 0;
79 }
80
74 last_state = &ldev->states[last_idx]; 81 last_state = &ldev->states[last_idx];
75 82
76 if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) 83 if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
@@ -81,8 +88,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
81 /* consider promotion */ 88 /* consider promotion */
82 if (last_idx < dev->state_count - 1 && 89 if (last_idx < dev->state_count - 1 &&
83 last_residency > last_state->threshold.promotion_time && 90 last_residency > last_state->threshold.promotion_time &&
84 dev->states[last_idx + 1].exit_latency <= 91 dev->states[last_idx + 1].exit_latency <= latency_req) {
85 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
86 last_state->stats.promotion_count++; 92 last_state->stats.promotion_count++;
87 last_state->stats.demotion_count = 0; 93 last_state->stats.demotion_count = 0;
88 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { 94 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -92,7 +98,19 @@ static int ladder_select_state(struct cpuidle_device *dev)
92 } 98 }
93 99
94 /* consider demotion */ 100 /* consider demotion */
95 if (last_idx > 0 && 101 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
102 dev->states[last_idx].exit_latency > latency_req) {
103 int i;
104
105 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
106 if (dev->states[i].exit_latency <= latency_req)
107 break;
108 }
109 ladder_do_selection(ldev, last_idx, i);
110 return i;
111 }
112
113 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
96 last_residency < last_state->threshold.demotion_time) { 114 last_residency < last_state->threshold.demotion_time) {
97 last_state->stats.demotion_count++; 115 last_state->stats.demotion_count++;
98 last_state->stats.promotion_count = 0; 116 last_state->stats.promotion_count = 0;
@@ -117,7 +135,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
117 struct ladder_device_state *lstate; 135 struct ladder_device_state *lstate;
118 struct cpuidle_state *state; 136 struct cpuidle_state *state;
119 137
120 ldev->last_state_idx = 0; 138 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
121 139
122 for (i = 0; i < dev->state_count; i++) { 140 for (i = 0; i < dev->state_count; i++) {
123 state = &dev->states[i]; 141 state = &dev->states[i];
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 78d77c5dc35c..8d7cf3f31450 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
34static int menu_select(struct cpuidle_device *dev) 34static int menu_select(struct cpuidle_device *dev)
35{ 35{
36 struct menu_device *data = &__get_cpu_var(menu_devices); 36 struct menu_device *data = &__get_cpu_var(menu_devices);
37 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
37 int i; 38 int i;
38 39
40 /* Special case when user has set very strict latency requirement */
41 if (unlikely(latency_req == 0)) {
42 data->last_state_idx = 0;
43 return 0;
44 }
45
39 /* determine the expected residency time */ 46 /* determine the expected residency time */
40 data->expected_us = 47 data->expected_us =
41 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; 48 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
42 49
43 /* find the deepest idle state that satisfies our constraints */ 50 /* find the deepest idle state that satisfies our constraints */
44 for (i = 1; i < dev->state_count; i++) { 51 for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
45 struct cpuidle_state *s = &dev->states[i]; 52 struct cpuidle_state *s = &dev->states[i];
46 53
47 if (s->target_residency > data->expected_us) 54 if (s->target_residency > data->expected_us)
48 break; 55 break;
49 if (s->target_residency > data->predicted_us) 56 if (s->target_residency > data->predicted_us)
50 break; 57 break;
51 if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) 58 if (s->exit_latency > latency_req)
52 break; 59 break;
53 } 60 }
54 61
@@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev)
67{ 74{
68 struct menu_device *data = &__get_cpu_var(menu_devices); 75 struct menu_device *data = &__get_cpu_var(menu_devices);
69 int last_idx = data->last_state_idx; 76 int last_idx = data->last_state_idx;
70 unsigned int measured_us = 77 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
71 cpuidle_get_last_residency(dev) + data->elapsed_us;
72 struct cpuidle_state *target = &dev->states[last_idx]; 78 struct cpuidle_state *target = &dev->states[last_idx];
79 unsigned int measured_us;
73 80
74 /* 81 /*
75 * Ugh, this idle state doesn't support residency measurements, so we 82 * Ugh, this idle state doesn't support residency measurements, so we
@@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev)
77 * for one full standard timer tick. However, be aware that this 84 * for one full standard timer tick. However, be aware that this
78 * could potentially result in a suboptimal state transition. 85 * could potentially result in a suboptimal state transition.
79 */ 86 */
80 if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) 87 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
81 measured_us = USEC_PER_SEC / HZ; 88 last_idle_us = USEC_PER_SEC / HZ;
89
90 /*
91 * measured_us and elapsed_us are the cumulative idle time, since the
92 * last time we were woken out of idle by an interrupt.
93 */
94 if (data->elapsed_us <= data->elapsed_us + last_idle_us)
95 measured_us = data->elapsed_us + last_idle_us;
96 else
97 measured_us = -1;
98
99 /* Predict time until next break event */
100 data->predicted_us = max(measured_us, data->last_measured_us);
82 101
83 /* Predict time remaining until next break event */ 102 if (last_idle_us + BREAK_FUZZ <
84 if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { 103 data->expected_us - target->exit_latency) {
85 data->predicted_us = max(measured_us, data->last_measured_us);
86 data->last_measured_us = measured_us; 104 data->last_measured_us = measured_us;
87 data->elapsed_us = 0; 105 data->elapsed_us = 0;
88 } else { 106 } else {
89 if (data->elapsed_us < data->elapsed_us + measured_us) 107 data->elapsed_us = measured_us;
90 data->elapsed_us = measured_us;
91 else
92 data->elapsed_us = -1;
93 data->predicted_us = max(measured_us, data->last_measured_us);
94 } 108 }
95} 109}
96 110
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 31a0e0b455b6..97b003839fb6 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -21,8 +21,8 @@ static int __init cpuidle_sysfs_setup(char *unused)
21} 21}
22__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup); 22__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
23 23
24static ssize_t show_available_governors(struct sys_device *dev, 24static ssize_t show_available_governors(struct sysdev_class *class,
25 struct sysdev_attribute *attr, char *buf) 25 char *buf)
26{ 26{
27 ssize_t i = 0; 27 ssize_t i = 0;
28 struct cpuidle_governor *tmp; 28 struct cpuidle_governor *tmp;
@@ -40,8 +40,8 @@ out:
40 return i; 40 return i;
41} 41}
42 42
43static ssize_t show_current_driver(struct sys_device *dev, 43static ssize_t show_current_driver(struct sysdev_class *class,
44 struct sysdev_attribute *attr, char *buf) 44 char *buf)
45{ 45{
46 ssize_t ret; 46 ssize_t ret;
47 47
@@ -55,8 +55,8 @@ static ssize_t show_current_driver(struct sys_device *dev,
55 return ret; 55 return ret;
56} 56}
57 57
58static ssize_t show_current_governor(struct sys_device *dev, 58static ssize_t show_current_governor(struct sysdev_class *class,
59 struct sysdev_attribute *attr, char *buf) 59 char *buf)
60{ 60{
61 ssize_t ret; 61 ssize_t ret;
62 62
@@ -70,9 +70,8 @@ static ssize_t show_current_governor(struct sys_device *dev,
70 return ret; 70 return ret;
71} 71}
72 72
73static ssize_t store_current_governor(struct sys_device *dev, 73static ssize_t store_current_governor(struct sysdev_class *class,
74 struct sysdev_attribute *attr, 74 const char *buf, size_t count)
75 const char *buf, size_t count)
76{ 75{
77 char gov_name[CPUIDLE_NAME_LEN]; 76 char gov_name[CPUIDLE_NAME_LEN];
78 int ret = -EINVAL; 77 int ret = -EINVAL;
@@ -104,8 +103,9 @@ static ssize_t store_current_governor(struct sys_device *dev,
104 return count; 103 return count;
105} 104}
106 105
107static SYSDEV_ATTR(current_driver, 0444, show_current_driver, NULL); 106static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL);
108static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL); 107static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor,
108 NULL);
109 109
110static struct attribute *cpuclass_default_attrs[] = { 110static struct attribute *cpuclass_default_attrs[] = {
111 &attr_current_driver.attr, 111 &attr_current_driver.attr,
@@ -113,9 +113,10 @@ static struct attribute *cpuclass_default_attrs[] = {
113 NULL 113 NULL
114}; 114};
115 115
116static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL); 116static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors,
117static SYSDEV_ATTR(current_governor, 0644, show_current_governor, 117 NULL);
118 store_current_governor); 118static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor,
119 store_current_governor);
119 120
120static struct attribute *cpuclass_switch_attrs[] = { 121static struct attribute *cpuclass_switch_attrs[] = {
121 &attr_available_governors.attr, 122 &attr_available_governors.attr,
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 54a2a166e566..bf2917d197a0 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -16,6 +16,7 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <asm/byteorder.h> 18#include <asm/byteorder.h>
19#include <asm/i387.h>
19#include "padlock.h" 20#include "padlock.h"
20 21
21/* Control word. */ 22/* Control word. */
@@ -141,6 +142,12 @@ static inline void padlock_reset_key(void)
141 asm volatile ("pushfl; popfl"); 142 asm volatile ("pushfl; popfl");
142} 143}
143 144
145/*
146 * While the padlock instructions don't use FP/SSE registers, they
147 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
148 * should be used only inside the irq_ts_save/restore() context
149 */
150
144static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, 151static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
145 void *control_word) 152 void *control_word)
146{ 153{
@@ -205,15 +212,23 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
205static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 212static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
206{ 213{
207 struct aes_ctx *ctx = aes_ctx(tfm); 214 struct aes_ctx *ctx = aes_ctx(tfm);
215 int ts_state;
208 padlock_reset_key(); 216 padlock_reset_key();
217
218 ts_state = irq_ts_save();
209 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); 219 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
220 irq_ts_restore(ts_state);
210} 221}
211 222
212static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 223static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
213{ 224{
214 struct aes_ctx *ctx = aes_ctx(tfm); 225 struct aes_ctx *ctx = aes_ctx(tfm);
226 int ts_state;
215 padlock_reset_key(); 227 padlock_reset_key();
228
229 ts_state = irq_ts_save();
216 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); 230 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
231 irq_ts_restore(ts_state);
217} 232}
218 233
219static struct crypto_alg aes_alg = { 234static struct crypto_alg aes_alg = {
@@ -244,12 +259,14 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
244 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 259 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
245 struct blkcipher_walk walk; 260 struct blkcipher_walk walk;
246 int err; 261 int err;
262 int ts_state;
247 263
248 padlock_reset_key(); 264 padlock_reset_key();
249 265
250 blkcipher_walk_init(&walk, dst, src, nbytes); 266 blkcipher_walk_init(&walk, dst, src, nbytes);
251 err = blkcipher_walk_virt(desc, &walk); 267 err = blkcipher_walk_virt(desc, &walk);
252 268
269 ts_state = irq_ts_save();
253 while ((nbytes = walk.nbytes)) { 270 while ((nbytes = walk.nbytes)) {
254 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 271 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
255 ctx->E, &ctx->cword.encrypt, 272 ctx->E, &ctx->cword.encrypt,
@@ -257,6 +274,7 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
257 nbytes &= AES_BLOCK_SIZE - 1; 274 nbytes &= AES_BLOCK_SIZE - 1;
258 err = blkcipher_walk_done(desc, &walk, nbytes); 275 err = blkcipher_walk_done(desc, &walk, nbytes);
259 } 276 }
277 irq_ts_restore(ts_state);
260 278
261 return err; 279 return err;
262} 280}
@@ -268,12 +286,14 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
268 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 286 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
269 struct blkcipher_walk walk; 287 struct blkcipher_walk walk;
270 int err; 288 int err;
289 int ts_state;
271 290
272 padlock_reset_key(); 291 padlock_reset_key();
273 292
274 blkcipher_walk_init(&walk, dst, src, nbytes); 293 blkcipher_walk_init(&walk, dst, src, nbytes);
275 err = blkcipher_walk_virt(desc, &walk); 294 err = blkcipher_walk_virt(desc, &walk);
276 295
296 ts_state = irq_ts_save();
277 while ((nbytes = walk.nbytes)) { 297 while ((nbytes = walk.nbytes)) {
278 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, 298 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
279 ctx->D, &ctx->cword.decrypt, 299 ctx->D, &ctx->cword.decrypt,
@@ -281,7 +301,7 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
281 nbytes &= AES_BLOCK_SIZE - 1; 301 nbytes &= AES_BLOCK_SIZE - 1;
282 err = blkcipher_walk_done(desc, &walk, nbytes); 302 err = blkcipher_walk_done(desc, &walk, nbytes);
283 } 303 }
284 304 irq_ts_restore(ts_state);
285 return err; 305 return err;
286} 306}
287 307
@@ -314,12 +334,14 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
314 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 334 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
315 struct blkcipher_walk walk; 335 struct blkcipher_walk walk;
316 int err; 336 int err;
337 int ts_state;
317 338
318 padlock_reset_key(); 339 padlock_reset_key();
319 340
320 blkcipher_walk_init(&walk, dst, src, nbytes); 341 blkcipher_walk_init(&walk, dst, src, nbytes);
321 err = blkcipher_walk_virt(desc, &walk); 342 err = blkcipher_walk_virt(desc, &walk);
322 343
344 ts_state = irq_ts_save();
323 while ((nbytes = walk.nbytes)) { 345 while ((nbytes = walk.nbytes)) {
324 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, 346 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
325 walk.dst.virt.addr, ctx->E, 347 walk.dst.virt.addr, ctx->E,
@@ -329,6 +351,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
329 nbytes &= AES_BLOCK_SIZE - 1; 351 nbytes &= AES_BLOCK_SIZE - 1;
330 err = blkcipher_walk_done(desc, &walk, nbytes); 352 err = blkcipher_walk_done(desc, &walk, nbytes);
331 } 353 }
354 irq_ts_restore(ts_state);
332 355
333 return err; 356 return err;
334} 357}
@@ -340,12 +363,14 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
340 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); 363 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
341 struct blkcipher_walk walk; 364 struct blkcipher_walk walk;
342 int err; 365 int err;
366 int ts_state;
343 367
344 padlock_reset_key(); 368 padlock_reset_key();
345 369
346 blkcipher_walk_init(&walk, dst, src, nbytes); 370 blkcipher_walk_init(&walk, dst, src, nbytes);
347 err = blkcipher_walk_virt(desc, &walk); 371 err = blkcipher_walk_virt(desc, &walk);
348 372
373 ts_state = irq_ts_save();
349 while ((nbytes = walk.nbytes)) { 374 while ((nbytes = walk.nbytes)) {
350 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, 375 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
351 ctx->D, walk.iv, &ctx->cword.decrypt, 376 ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -354,6 +379,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
354 err = blkcipher_walk_done(desc, &walk, nbytes); 379 err = blkcipher_walk_done(desc, &walk, nbytes);
355 } 380 }
356 381
382 irq_ts_restore(ts_state);
357 return err; 383 return err;
358} 384}
359 385
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 40d5680fa013..a7fbadebf623 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <asm/i387.h>
25#include "padlock.h" 26#include "padlock.h"
26 27
27#define SHA1_DEFAULT_FALLBACK "sha1-generic" 28#define SHA1_DEFAULT_FALLBACK "sha1-generic"
@@ -102,6 +103,7 @@ static void padlock_do_sha1(const char *in, char *out, int count)
102 * PadLock microcode needs it that big. */ 103 * PadLock microcode needs it that big. */
103 char buf[128+16]; 104 char buf[128+16];
104 char *result = NEAREST_ALIGNED(buf); 105 char *result = NEAREST_ALIGNED(buf);
106 int ts_state;
105 107
106 ((uint32_t *)result)[0] = SHA1_H0; 108 ((uint32_t *)result)[0] = SHA1_H0;
107 ((uint32_t *)result)[1] = SHA1_H1; 109 ((uint32_t *)result)[1] = SHA1_H1;
@@ -109,9 +111,12 @@ static void padlock_do_sha1(const char *in, char *out, int count)
109 ((uint32_t *)result)[3] = SHA1_H3; 111 ((uint32_t *)result)[3] = SHA1_H3;
110 ((uint32_t *)result)[4] = SHA1_H4; 112 ((uint32_t *)result)[4] = SHA1_H4;
111 113
114 /* prevent taking the spurious DNA fault with padlock. */
115 ts_state = irq_ts_save();
112 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ 116 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
113 : "+S"(in), "+D"(result) 117 : "+S"(in), "+D"(result)
114 : "c"(count), "a"(0)); 118 : "c"(count), "a"(0));
119 irq_ts_restore(ts_state);
115 120
116 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); 121 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
117} 122}
@@ -123,6 +128,7 @@ static void padlock_do_sha256(const char *in, char *out, int count)
123 * PadLock microcode needs it that big. */ 128 * PadLock microcode needs it that big. */
124 char buf[128+16]; 129 char buf[128+16];
125 char *result = NEAREST_ALIGNED(buf); 130 char *result = NEAREST_ALIGNED(buf);
131 int ts_state;
126 132
127 ((uint32_t *)result)[0] = SHA256_H0; 133 ((uint32_t *)result)[0] = SHA256_H0;
128 ((uint32_t *)result)[1] = SHA256_H1; 134 ((uint32_t *)result)[1] = SHA256_H1;
@@ -133,9 +139,12 @@ static void padlock_do_sha256(const char *in, char *out, int count)
133 ((uint32_t *)result)[6] = SHA256_H6; 139 ((uint32_t *)result)[6] = SHA256_H6;
134 ((uint32_t *)result)[7] = SHA256_H7; 140 ((uint32_t *)result)[7] = SHA256_H7;
135 141
142 /* prevent taking the spurious DNA fault with padlock. */
143 ts_state = irq_ts_save();
136 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ 144 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
137 : "+S"(in), "+D"(result) 145 : "+S"(in), "+D"(result)
138 : "c"(count), "a"(0)); 146 : "c"(count), "a"(0));
147 irq_ts_restore(ts_state);
139 148
140 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); 149 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
141} 150}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 681c15f42083..ee827a7f7c6a 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -96,6 +96,9 @@ struct talitos_private {
96 unsigned int exec_units; 96 unsigned int exec_units;
97 unsigned int desc_types; 97 unsigned int desc_types;
98 98
99 /* SEC Compatibility info */
100 unsigned long features;
101
99 /* next channel to be assigned next incoming descriptor */ 102 /* next channel to be assigned next incoming descriptor */
100 atomic_t last_chan; 103 atomic_t last_chan;
101 104
@@ -133,6 +136,9 @@ struct talitos_private {
133 struct hwrng rng; 136 struct hwrng rng;
134}; 137};
135 138
139/* .features flag */
140#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
141
136/* 142/*
137 * map virtual single (contiguous) pointer to h/w descriptor pointer 143 * map virtual single (contiguous) pointer to h/w descriptor pointer
138 */ 144 */
@@ -785,7 +791,7 @@ static void ipsec_esp_encrypt_done(struct device *dev,
785 /* copy the generated ICV to dst */ 791 /* copy the generated ICV to dst */
786 if (edesc->dma_len) { 792 if (edesc->dma_len) {
787 icvdata = &edesc->link_tbl[edesc->src_nents + 793 icvdata = &edesc->link_tbl[edesc->src_nents +
788 edesc->dst_nents + 1]; 794 edesc->dst_nents + 2];
789 sg = sg_last(areq->dst, edesc->dst_nents); 795 sg = sg_last(areq->dst, edesc->dst_nents);
790 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, 796 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
791 icvdata, ctx->authsize); 797 icvdata, ctx->authsize);
@@ -814,7 +820,7 @@ static void ipsec_esp_decrypt_done(struct device *dev,
814 /* auth check */ 820 /* auth check */
815 if (edesc->dma_len) 821 if (edesc->dma_len)
816 icvdata = &edesc->link_tbl[edesc->src_nents + 822 icvdata = &edesc->link_tbl[edesc->src_nents +
817 edesc->dst_nents + 1]; 823 edesc->dst_nents + 2];
818 else 824 else
819 icvdata = &edesc->link_tbl[0]; 825 icvdata = &edesc->link_tbl[0];
820 826
@@ -921,10 +927,30 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
921 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen, 927 sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
922 &edesc->link_tbl[0]); 928 &edesc->link_tbl[0]);
923 if (sg_count > 1) { 929 if (sg_count > 1) {
930 struct talitos_ptr *link_tbl_ptr =
931 &edesc->link_tbl[sg_count-1];
932 struct scatterlist *sg;
933 struct talitos_private *priv = dev_get_drvdata(dev);
934
924 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 935 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
925 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl); 936 desc->ptr[4].ptr = cpu_to_be32(edesc->dma_link_tbl);
926 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 937 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
927 edesc->dma_len, DMA_BIDIRECTIONAL); 938 edesc->dma_len, DMA_BIDIRECTIONAL);
939 /* If necessary for this SEC revision,
940 * add a link table entry for ICV.
941 */
942 if ((priv->features &
943 TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT) &&
944 (edesc->desc.hdr & DESC_HDR_MODE0_ENCRYPT) == 0) {
945 link_tbl_ptr->j_extent = 0;
946 link_tbl_ptr++;
947 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
948 link_tbl_ptr->len = cpu_to_be16(authsize);
949 sg = sg_last(areq->src, edesc->src_nents ? : 1);
950 link_tbl_ptr->ptr = cpu_to_be32(
951 (char *)sg_dma_address(sg)
952 + sg->length - authsize);
953 }
928 } else { 954 } else {
929 /* Only one segment now, so no link tbl needed */ 955 /* Only one segment now, so no link tbl needed */
930 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src)); 956 desc->ptr[4].ptr = cpu_to_be32(sg_dma_address(areq->src));
@@ -944,12 +970,11 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
944 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst)); 970 desc->ptr[5].ptr = cpu_to_be32(sg_dma_address(areq->dst));
945 } else { 971 } else {
946 struct talitos_ptr *link_tbl_ptr = 972 struct talitos_ptr *link_tbl_ptr =
947 &edesc->link_tbl[edesc->src_nents]; 973 &edesc->link_tbl[edesc->src_nents + 1];
948 struct scatterlist *sg;
949 974
950 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *) 975 desc->ptr[5].ptr = cpu_to_be32((struct talitos_ptr *)
951 edesc->dma_link_tbl + 976 edesc->dma_link_tbl +
952 edesc->src_nents); 977 edesc->src_nents + 1);
953 if (areq->src == areq->dst) { 978 if (areq->src == areq->dst) {
954 memcpy(link_tbl_ptr, &edesc->link_tbl[0], 979 memcpy(link_tbl_ptr, &edesc->link_tbl[0],
955 edesc->src_nents * sizeof(struct talitos_ptr)); 980 edesc->src_nents * sizeof(struct talitos_ptr));
@@ -957,14 +982,10 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
957 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 982 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
958 link_tbl_ptr); 983 link_tbl_ptr);
959 } 984 }
985 /* Add an entry to the link table for ICV data */
960 link_tbl_ptr += sg_count - 1; 986 link_tbl_ptr += sg_count - 1;
961
962 /* handle case where sg_last contains the ICV exclusively */
963 sg = sg_last(areq->dst, edesc->dst_nents);
964 if (sg->length == ctx->authsize)
965 link_tbl_ptr--;
966
967 link_tbl_ptr->j_extent = 0; 987 link_tbl_ptr->j_extent = 0;
988 sg_count++;
968 link_tbl_ptr++; 989 link_tbl_ptr++;
969 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 990 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
970 link_tbl_ptr->len = cpu_to_be16(authsize); 991 link_tbl_ptr->len = cpu_to_be16(authsize);
@@ -973,7 +994,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
973 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *) 994 link_tbl_ptr->ptr = cpu_to_be32((struct talitos_ptr *)
974 edesc->dma_link_tbl + 995 edesc->dma_link_tbl +
975 edesc->src_nents + 996 edesc->src_nents +
976 edesc->dst_nents + 1); 997 edesc->dst_nents + 2);
977 998
978 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 999 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
979 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1000 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
@@ -1040,12 +1061,12 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1040 1061
1041 /* 1062 /*
1042 * allocate space for base edesc plus the link tables, 1063 * allocate space for base edesc plus the link tables,
1043 * allowing for a separate entry for the generated ICV (+ 1), 1064 * allowing for two separate entries for ICV and generated ICV (+ 2),
1044 * and the ICV data itself 1065 * and the ICV data itself
1045 */ 1066 */
1046 alloc_len = sizeof(struct ipsec_esp_edesc); 1067 alloc_len = sizeof(struct ipsec_esp_edesc);
1047 if (src_nents || dst_nents) { 1068 if (src_nents || dst_nents) {
1048 dma_len = (src_nents + dst_nents + 1) * 1069 dma_len = (src_nents + dst_nents + 2) *
1049 sizeof(struct talitos_ptr) + ctx->authsize; 1070 sizeof(struct talitos_ptr) + ctx->authsize;
1050 alloc_len += dma_len; 1071 alloc_len += dma_len;
1051 } else { 1072 } else {
@@ -1104,7 +1125,7 @@ static int aead_authenc_decrypt(struct aead_request *req)
1104 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1125 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1105 if (edesc->dma_len) 1126 if (edesc->dma_len)
1106 icvdata = &edesc->link_tbl[edesc->src_nents + 1127 icvdata = &edesc->link_tbl[edesc->src_nents +
1107 edesc->dst_nents + 1]; 1128 edesc->dst_nents + 2];
1108 else 1129 else
1109 icvdata = &edesc->link_tbl[0]; 1130 icvdata = &edesc->link_tbl[0];
1110 1131
@@ -1480,6 +1501,9 @@ static int talitos_probe(struct of_device *ofdev,
1480 goto err_out; 1501 goto err_out;
1481 } 1502 }
1482 1503
1504 if (of_device_is_compatible(np, "fsl,sec3.0"))
1505 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
1506
1483 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1507 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1484 GFP_KERNEL); 1508 GFP_KERNEL);
1485 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1509 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index a4e4494663bf..0328da020a10 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -25,7 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/memory.h> 27#include <linux/memory.h>
28#include <asm/plat-orion/mv_xor.h> 28#include <plat/mv_xor.h>
29#include "mv_xor.h" 29#include "mv_xor.h"
30 30
31static void mv_xor_issue_pending(struct dma_chan *chan); 31static void mv_xor_issue_pending(struct dma_chan *chan);
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 001622eb86f9..3bf8ee120d42 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -84,20 +84,23 @@ static struct kobj_type memmap_ktype = {
84 */ 84 */
85 85
86/* 86/*
87 * Firmware memory map entries 87 * Firmware memory map entries. No locking is needed because the
88 * firmware_map_add() and firmware_map_add_early() functions are called
89 * in firmware initialisation code in one single thread of execution.
88 */ 90 */
89static LIST_HEAD(map_entries); 91static LIST_HEAD(map_entries);
90 92
91/** 93/**
92 * Common implementation of firmware_map_add() and firmware_map_add_early() 94 * firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
93 * which expects a pre-allocated struct firmware_map_entry.
94 *
95 * @start: Start of the memory range. 95 * @start: Start of the memory range.
96 * @end: End of the memory range (inclusive). 96 * @end: End of the memory range (inclusive).
97 * @type: Type of the memory range. 97 * @type: Type of the memory range.
98 * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised 98 * @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
99 * entry. 99 * entry.
100 */ 100 *
101 * Common implementation of firmware_map_add() and firmware_map_add_early()
102 * which expects a pre-allocated struct firmware_map_entry.
103 **/
101static int firmware_map_add_entry(resource_size_t start, resource_size_t end, 104static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
102 const char *type, 105 const char *type,
103 struct firmware_map_entry *entry) 106 struct firmware_map_entry *entry)
@@ -115,33 +118,52 @@ static int firmware_map_add_entry(resource_size_t start, resource_size_t end,
115 return 0; 118 return 0;
116} 119}
117 120
118/* 121/**
119 * See <linux/firmware-map.h> for documentation. 122 * firmware_map_add() - Adds a firmware mapping entry.
120 */ 123 * @start: Start of the memory range.
124 * @end: End of the memory range (inclusive).
125 * @type: Type of the memory range.
126 *
127 * This function uses kmalloc() for memory
128 * allocation. Use firmware_map_add_early() if you want to use the bootmem
129 * allocator.
130 *
131 * That function must be called before late_initcall.
132 *
133 * Returns 0 on success, or -ENOMEM if no memory could be allocated.
134 **/
121int firmware_map_add(resource_size_t start, resource_size_t end, 135int firmware_map_add(resource_size_t start, resource_size_t end,
122 const char *type) 136 const char *type)
123{ 137{
124 struct firmware_map_entry *entry; 138 struct firmware_map_entry *entry;
125 139
126 entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC); 140 entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
127 WARN_ON(!entry);
128 if (!entry) 141 if (!entry)
129 return -ENOMEM; 142 return -ENOMEM;
130 143
131 return firmware_map_add_entry(start, end, type, entry); 144 return firmware_map_add_entry(start, end, type, entry);
132} 145}
133 146
134/* 147/**
135 * See <linux/firmware-map.h> for documentation. 148 * firmware_map_add_early() - Adds a firmware mapping entry.
136 */ 149 * @start: Start of the memory range.
150 * @end: End of the memory range (inclusive).
151 * @type: Type of the memory range.
152 *
153 * Adds a firmware mapping entry. This function uses the bootmem allocator
154 * for memory allocation. Use firmware_map_add() if you want to use kmalloc().
155 *
156 * That function must be called before late_initcall.
157 *
158 * Returns 0 on success, or -ENOMEM if no memory could be allocated.
159 **/
137int __init firmware_map_add_early(resource_size_t start, resource_size_t end, 160int __init firmware_map_add_early(resource_size_t start, resource_size_t end,
138 const char *type) 161 const char *type)
139{ 162{
140 struct firmware_map_entry *entry; 163 struct firmware_map_entry *entry;
141 164
142 entry = alloc_bootmem_low(sizeof(struct firmware_map_entry)); 165 entry = alloc_bootmem_low(sizeof(struct firmware_map_entry));
143 WARN_ON(!entry); 166 if (WARN_ON(!entry))
144 if (!entry)
145 return -ENOMEM; 167 return -ENOMEM;
146 168
147 return firmware_map_add_entry(start, end, type, entry); 169 return firmware_map_add_entry(start, end, type, entry);
@@ -183,7 +205,10 @@ static ssize_t memmap_attr_show(struct kobject *kobj,
183/* 205/*
184 * Initialises stuff and adds the entries in the map_entries list to 206 * Initialises stuff and adds the entries in the map_entries list to
185 * sysfs. Important is that firmware_map_add() and firmware_map_add_early() 207 * sysfs. Important is that firmware_map_add() and firmware_map_add_early()
186 * must be called before late_initcall. 208 * must be called before late_initcall. That's just because that function
209 * is called as late_initcall() function, which means that if you call
210 * firmware_map_add() or firmware_map_add_early() afterwards, the entries
211 * are not added to sysfs.
187 */ 212 */
188static int __init memmap_init(void) 213static int __init memmap_init(void)
189{ 214{
@@ -192,13 +217,13 @@ static int __init memmap_init(void)
192 struct kset *memmap_kset; 217 struct kset *memmap_kset;
193 218
194 memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj); 219 memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
195 WARN_ON(!memmap_kset); 220 if (WARN_ON(!memmap_kset))
196 if (!memmap_kset)
197 return -ENOMEM; 221 return -ENOMEM;
198 222
199 list_for_each_entry(entry, &map_entries, list) { 223 list_for_each_entry(entry, &map_entries, list) {
200 entry->kobj.kset = memmap_kset; 224 entry->kobj.kset = memmap_kset;
201 kobject_add(&entry->kobj, NULL, "%d", i++); 225 if (kobject_add(&entry->kobj, NULL, "%d", i++))
226 kobject_put(&entry->kobj);
202 } 227 }
203 228
204 return 0; 229 return 0;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 61e78a4369b9..b15f88249639 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -654,12 +654,12 @@ static const struct hid_blacklist {
654 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, 654 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
655 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, 655 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD },
656 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN }, 656 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN },
657 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN }, 657 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
658 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, 658 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD | HID_QUIRK_IGNORE_MOUSE},
659 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN }, 659 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE},
660 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN }, 660 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE},
661 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD }, 661 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_APPLE_ISO_KEYBOARD | HID_QUIRK_IGNORE_MOUSE },
662 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN }, 662 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
663 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 663 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
664 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 664 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_APPLE_NUMLOCK_EMULATION | HID_QUIRK_APPLE_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
665 665
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bf4ebfb86fa5..d402e8d813ce 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -77,6 +77,22 @@ config SENSORS_AD7418
77 This driver can also be built as a module. If so, the module 77 This driver can also be built as a module. If so, the module
78 will be called ad7418. 78 will be called ad7418.
79 79
80config SENSORS_ADCXX
81 tristate "National Semiconductor ADCxxxSxxx"
82 depends on SPI_MASTER && EXPERIMENTAL
83 help
84 If you say yes here you get support for the National Semiconductor
85 ADC<bb><c>S<sss> chip family, where
86 * bb is the resolution in number of bits (8, 10, 12)
87 * c is the number of channels (1, 2, 4, 8)
88 * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500
89 kSPS and 101 for 1 MSPS)
90
91 Examples : ADC081S101, ADC124S501, ...
92
93 This driver can also be built as a module. If so, the module
94 will be called adcxx.
95
80config SENSORS_ADM1021 96config SENSORS_ADM1021
81 tristate "Analog Devices ADM1021 and compatibles" 97 tristate "Analog Devices ADM1021 and compatibles"
82 depends on I2C 98 depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 7943e5cefb06..950134ab8426 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_ABITUGURU) += abituguru.o
17obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o 17obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
18obj-$(CONFIG_SENSORS_AD7414) += ad7414.o 18obj-$(CONFIG_SENSORS_AD7414) += ad7414.o
19obj-$(CONFIG_SENSORS_AD7418) += ad7418.o 19obj-$(CONFIG_SENSORS_AD7418) += ad7418.o
20obj-$(CONFIG_SENSORS_ADCXX) += adcxx.o
20obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o 21obj-$(CONFIG_SENSORS_ADM1021) += adm1021.o
21obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o 22obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o
22obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o 23obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index f00f497b9ca9..d568c65c1370 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1,5 +1,8 @@
1/* 1/*
2 abituguru3.c Copyright (c) 2006 Hans de Goede <j.w.r.degoede@hhs.nl> 2 abituguru3.c
3
4 Copyright (c) 2006-2008 Hans de Goede <j.w.r.degoede@hhs.nl>
5 Copyright (c) 2008 Alistair John Strachan <alistair@devzero.co.uk>
3 6
4 This program is free software; you can redistribute it and/or modify 7 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by 8 it under the terms of the GNU General Public License as published by
@@ -116,7 +119,7 @@ struct abituguru3_sensor_info {
116 119
117struct abituguru3_motherboard_info { 120struct abituguru3_motherboard_info {
118 u16 id; 121 u16 id;
119 const char *name; 122 const char *dmi_name;
120 /* + 1 -> end of sensors indicated by a sensor with name == NULL */ 123 /* + 1 -> end of sensors indicated by a sensor with name == NULL */
121 struct abituguru3_sensor_info sensors[ABIT_UGURU3_MAX_NO_SENSORS + 1]; 124 struct abituguru3_sensor_info sensors[ABIT_UGURU3_MAX_NO_SENSORS + 1];
122}; 125};
@@ -161,7 +164,7 @@ struct abituguru3_data {
161 164
162/* Constants */ 165/* Constants */
163static const struct abituguru3_motherboard_info abituguru3_motherboards[] = { 166static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
164 { 0x000C, "unknown", { 167 { 0x000C, NULL /* Unknown, need DMI string */, {
165 { "CPU Core", 0, 0, 10, 1, 0 }, 168 { "CPU Core", 0, 0, 10, 1, 0 },
166 { "DDR", 1, 0, 10, 1, 0 }, 169 { "DDR", 1, 0, 10, 1, 0 },
167 { "DDR VTT", 2, 0, 10, 1, 0 }, 170 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -183,7 +186,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
183 { "AUX1 Fan", 35, 2, 60, 1, 0 }, 186 { "AUX1 Fan", 35, 2, 60, 1, 0 },
184 { NULL, 0, 0, 0, 0, 0 } } 187 { NULL, 0, 0, 0, 0, 0 } }
185 }, 188 },
186 { 0x000D, "Abit AW8", { 189 { 0x000D, NULL /* Abit AW8, need DMI string */, {
187 { "CPU Core", 0, 0, 10, 1, 0 }, 190 { "CPU Core", 0, 0, 10, 1, 0 },
188 { "DDR", 1, 0, 10, 1, 0 }, 191 { "DDR", 1, 0, 10, 1, 0 },
189 { "DDR VTT", 2, 0, 10, 1, 0 }, 192 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -212,7 +215,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
212 { "AUX5 Fan", 39, 2, 60, 1, 0 }, 215 { "AUX5 Fan", 39, 2, 60, 1, 0 },
213 { NULL, 0, 0, 0, 0, 0 } } 216 { NULL, 0, 0, 0, 0, 0 } }
214 }, 217 },
215 { 0x000E, "AL-8", { 218 { 0x000E, NULL /* AL-8, need DMI string */, {
216 { "CPU Core", 0, 0, 10, 1, 0 }, 219 { "CPU Core", 0, 0, 10, 1, 0 },
217 { "DDR", 1, 0, 10, 1, 0 }, 220 { "DDR", 1, 0, 10, 1, 0 },
218 { "DDR VTT", 2, 0, 10, 1, 0 }, 221 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -233,7 +236,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
233 { "SYS Fan", 34, 2, 60, 1, 0 }, 236 { "SYS Fan", 34, 2, 60, 1, 0 },
234 { NULL, 0, 0, 0, 0, 0 } } 237 { NULL, 0, 0, 0, 0, 0 } }
235 }, 238 },
236 { 0x000F, "unknown", { 239 { 0x000F, NULL /* Unknown, need DMI string */, {
237 { "CPU Core", 0, 0, 10, 1, 0 }, 240 { "CPU Core", 0, 0, 10, 1, 0 },
238 { "DDR", 1, 0, 10, 1, 0 }, 241 { "DDR", 1, 0, 10, 1, 0 },
239 { "DDR VTT", 2, 0, 10, 1, 0 }, 242 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -254,7 +257,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
254 { "SYS Fan", 34, 2, 60, 1, 0 }, 257 { "SYS Fan", 34, 2, 60, 1, 0 },
255 { NULL, 0, 0, 0, 0, 0 } } 258 { NULL, 0, 0, 0, 0, 0 } }
256 }, 259 },
257 { 0x0010, "Abit NI8 SLI GR", { 260 { 0x0010, NULL /* Abit NI8 SLI GR, need DMI string */, {
258 { "CPU Core", 0, 0, 10, 1, 0 }, 261 { "CPU Core", 0, 0, 10, 1, 0 },
259 { "DDR", 1, 0, 10, 1, 0 }, 262 { "DDR", 1, 0, 10, 1, 0 },
260 { "DDR VTT", 2, 0, 10, 1, 0 }, 263 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -276,7 +279,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
276 { "OTES1 Fan", 36, 2, 60, 1, 0 }, 279 { "OTES1 Fan", 36, 2, 60, 1, 0 },
277 { NULL, 0, 0, 0, 0, 0 } } 280 { NULL, 0, 0, 0, 0, 0 } }
278 }, 281 },
279 { 0x0011, "Abit AT8 32X", { 282 { 0x0011, NULL /* Abit AT8 32X, need DMI string */, {
280 { "CPU Core", 0, 0, 10, 1, 0 }, 283 { "CPU Core", 0, 0, 10, 1, 0 },
281 { "DDR", 1, 0, 20, 1, 0 }, 284 { "DDR", 1, 0, 20, 1, 0 },
282 { "DDR VTT", 2, 0, 10, 1, 0 }, 285 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -302,7 +305,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
302 { "AUX2 Fan", 36, 2, 60, 1, 0 }, 305 { "AUX2 Fan", 36, 2, 60, 1, 0 },
303 { NULL, 0, 0, 0, 0, 0 } } 306 { NULL, 0, 0, 0, 0, 0 } }
304 }, 307 },
305 { 0x0012, "Abit AN8 32X", { 308 { 0x0012, NULL /* Abit AN8 32X, need DMI string */, {
306 { "CPU Core", 0, 0, 10, 1, 0 }, 309 { "CPU Core", 0, 0, 10, 1, 0 },
307 { "DDR", 1, 0, 20, 1, 0 }, 310 { "DDR", 1, 0, 20, 1, 0 },
308 { "DDR VTT", 2, 0, 10, 1, 0 }, 311 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -324,7 +327,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
324 { "AUX1 Fan", 36, 2, 60, 1, 0 }, 327 { "AUX1 Fan", 36, 2, 60, 1, 0 },
325 { NULL, 0, 0, 0, 0, 0 } } 328 { NULL, 0, 0, 0, 0, 0 } }
326 }, 329 },
327 { 0x0013, "Abit AW8D", { 330 { 0x0013, NULL /* Abit AW8D, need DMI string */, {
328 { "CPU Core", 0, 0, 10, 1, 0 }, 331 { "CPU Core", 0, 0, 10, 1, 0 },
329 { "DDR", 1, 0, 10, 1, 0 }, 332 { "DDR", 1, 0, 10, 1, 0 },
330 { "DDR VTT", 2, 0, 10, 1, 0 }, 333 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -353,7 +356,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
353 { "AUX5 Fan", 39, 2, 60, 1, 0 }, 356 { "AUX5 Fan", 39, 2, 60, 1, 0 },
354 { NULL, 0, 0, 0, 0, 0 } } 357 { NULL, 0, 0, 0, 0, 0 } }
355 }, 358 },
356 { 0x0014, "Abit AB9 Pro", { 359 { 0x0014, NULL /* Abit AB9 Pro, need DMI string */, {
357 { "CPU Core", 0, 0, 10, 1, 0 }, 360 { "CPU Core", 0, 0, 10, 1, 0 },
358 { "DDR", 1, 0, 10, 1, 0 }, 361 { "DDR", 1, 0, 10, 1, 0 },
359 { "DDR VTT", 2, 0, 10, 1, 0 }, 362 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -374,7 +377,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
374 { "SYS Fan", 34, 2, 60, 1, 0 }, 377 { "SYS Fan", 34, 2, 60, 1, 0 },
375 { NULL, 0, 0, 0, 0, 0 } } 378 { NULL, 0, 0, 0, 0, 0 } }
376 }, 379 },
377 { 0x0015, "unknown", { 380 { 0x0015, NULL /* Unknown, need DMI string */, {
378 { "CPU Core", 0, 0, 10, 1, 0 }, 381 { "CPU Core", 0, 0, 10, 1, 0 },
379 { "DDR", 1, 0, 20, 1, 0 }, 382 { "DDR", 1, 0, 20, 1, 0 },
380 { "DDR VTT", 2, 0, 10, 1, 0 }, 383 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -398,7 +401,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
398 { "AUX3 Fan", 36, 2, 60, 1, 0 }, 401 { "AUX3 Fan", 36, 2, 60, 1, 0 },
399 { NULL, 0, 0, 0, 0, 0 } } 402 { NULL, 0, 0, 0, 0, 0 } }
400 }, 403 },
401 { 0x0016, "AW9D-MAX", { 404 { 0x0016, NULL /* AW9D-MAX, need DMI string */, {
402 { "CPU Core", 0, 0, 10, 1, 0 }, 405 { "CPU Core", 0, 0, 10, 1, 0 },
403 { "DDR2", 1, 0, 20, 1, 0 }, 406 { "DDR2", 1, 0, 20, 1, 0 },
404 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 407 { "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -426,7 +429,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
426 { "OTES1 Fan", 38, 2, 60, 1, 0 }, 429 { "OTES1 Fan", 38, 2, 60, 1, 0 },
427 { NULL, 0, 0, 0, 0, 0 } } 430 { NULL, 0, 0, 0, 0, 0 } }
428 }, 431 },
429 { 0x0017, "unknown", { 432 { 0x0017, NULL /* Unknown, need DMI string */, {
430 { "CPU Core", 0, 0, 10, 1, 0 }, 433 { "CPU Core", 0, 0, 10, 1, 0 },
431 { "DDR2", 1, 0, 20, 1, 0 }, 434 { "DDR2", 1, 0, 20, 1, 0 },
432 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 435 { "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -451,7 +454,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
451 { "AUX3 FAN", 37, 2, 60, 1, 0 }, 454 { "AUX3 FAN", 37, 2, 60, 1, 0 },
452 { NULL, 0, 0, 0, 0, 0 } } 455 { NULL, 0, 0, 0, 0, 0 } }
453 }, 456 },
454 { 0x0018, "unknown", { 457 { 0x0018, NULL /* Unknown, need DMI string */, {
455 { "CPU Core", 0, 0, 10, 1, 0 }, 458 { "CPU Core", 0, 0, 10, 1, 0 },
456 { "DDR2", 1, 0, 20, 1, 0 }, 459 { "DDR2", 1, 0, 20, 1, 0 },
457 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 460 { "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -478,7 +481,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
478 { "AUX3 Fan", 36, 2, 60, 1, 0 }, 481 { "AUX3 Fan", 36, 2, 60, 1, 0 },
479 { NULL, 0, 0, 0, 0, 0 } } 482 { NULL, 0, 0, 0, 0, 0 } }
480 }, 483 },
481 { 0x0019, "unknown", { 484 { 0x0019, NULL /* Unknown, need DMI string */, {
482 { "CPU Core", 7, 0, 10, 1, 0 }, 485 { "CPU Core", 7, 0, 10, 1, 0 },
483 { "DDR2", 13, 0, 20, 1, 0 }, 486 { "DDR2", 13, 0, 20, 1, 0 },
484 { "DDR2 VTT", 14, 0, 10, 1, 0 }, 487 { "DDR2 VTT", 14, 0, 10, 1, 0 },
@@ -505,7 +508,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
505 { "AUX3 FAN", 36, 2, 60, 1, 0 }, 508 { "AUX3 FAN", 36, 2, 60, 1, 0 },
506 { NULL, 0, 0, 0, 0, 0 } } 509 { NULL, 0, 0, 0, 0, 0 } }
507 }, 510 },
508 { 0x001A, "Abit IP35 Pro", { 511 { 0x001A, "IP35 Pro(Intel P35-ICH9R)", {
509 { "CPU Core", 0, 0, 10, 1, 0 }, 512 { "CPU Core", 0, 0, 10, 1, 0 },
510 { "DDR2", 1, 0, 20, 1, 0 }, 513 { "DDR2", 1, 0, 20, 1, 0 },
511 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 514 { "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -533,7 +536,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
533 { "AUX4 Fan", 37, 2, 60, 1, 0 }, 536 { "AUX4 Fan", 37, 2, 60, 1, 0 },
534 { NULL, 0, 0, 0, 0, 0 } } 537 { NULL, 0, 0, 0, 0, 0 } }
535 }, 538 },
536 { 0x001B, "unknown", { 539 { 0x001B, NULL /* Unknown, need DMI string */, {
537 { "CPU Core", 0, 0, 10, 1, 0 }, 540 { "CPU Core", 0, 0, 10, 1, 0 },
538 { "DDR3", 1, 0, 20, 1, 0 }, 541 { "DDR3", 1, 0, 20, 1, 0 },
539 { "DDR3 VTT", 2, 0, 10, 1, 0 }, 542 { "DDR3 VTT", 2, 0, 10, 1, 0 },
@@ -560,7 +563,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
560 { "AUX3 Fan", 36, 2, 60, 1, 0 }, 563 { "AUX3 Fan", 36, 2, 60, 1, 0 },
561 { NULL, 0, 0, 0, 0, 0 } } 564 { NULL, 0, 0, 0, 0, 0 } }
562 }, 565 },
563 { 0x001C, "unknown", { 566 { 0x001C, NULL /* Unknown, need DMI string */, {
564 { "CPU Core", 0, 0, 10, 1, 0 }, 567 { "CPU Core", 0, 0, 10, 1, 0 },
565 { "DDR2", 1, 0, 20, 1, 0 }, 568 { "DDR2", 1, 0, 20, 1, 0 },
566 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 569 { "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -935,9 +938,18 @@ static int __devinit abituguru3_probe(struct platform_device *pdev)
935 goto abituguru3_probe_error; 938 goto abituguru3_probe_error;
936 } 939 }
937 data->sensors = abituguru3_motherboards[i].sensors; 940 data->sensors = abituguru3_motherboards[i].sensors;
941
938 printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard " 942 printk(KERN_INFO ABIT_UGURU3_NAME ": found Abit uGuru3, motherboard "
939 "ID: %04X (%s)\n", (unsigned int)id, 943 "ID: %04X\n", (unsigned int)id);
940 abituguru3_motherboards[i].name); 944
945#ifdef CONFIG_DMI
946 if (!abituguru3_motherboards[i].dmi_name) {
947 printk(KERN_WARNING ABIT_UGURU3_NAME ": this motherboard was "
948 "not detected using DMI. Please send the output of "
949 "\"dmidecode\" to the abituguru3 maintainer"
950 "(see MAINTAINERS)\n");
951 }
952#endif
941 953
942 /* Fill the sysfs attr array */ 954 /* Fill the sysfs attr array */
943 sysfs_attr_i = 0; 955 sysfs_attr_i = 0;
@@ -1109,6 +1121,46 @@ static struct platform_driver abituguru3_driver = {
1109 .resume = abituguru3_resume 1121 .resume = abituguru3_resume
1110}; 1122};
1111 1123
1124#ifdef CONFIG_DMI
1125
1126static int __init abituguru3_dmi_detect(void)
1127{
1128 const char *board_vendor, *board_name;
1129 int i, err = (force) ? 1 : -ENODEV;
1130
1131 board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
1132 if (!board_vendor || strcmp(board_vendor, "http://www.abit.com.tw/"))
1133 return err;
1134
1135 board_name = dmi_get_system_info(DMI_BOARD_NAME);
1136 if (!board_name)
1137 return err;
1138
1139 for (i = 0; abituguru3_motherboards[i].id; i++) {
1140 const char *dmi_name = abituguru3_motherboards[i].dmi_name;
1141 if (dmi_name && !strcmp(dmi_name, board_name))
1142 break;
1143 }
1144
1145 if (!abituguru3_motherboards[i].id)
1146 return 1;
1147
1148 return 0;
1149}
1150
1151#else /* !CONFIG_DMI */
1152
1153static inline int abituguru3_dmi_detect(void)
1154{
1155 return -ENODEV;
1156}
1157
1158#endif /* CONFIG_DMI */
1159
1160/* FIXME: Manual detection should die eventually; we need to collect stable
1161 * DMI model names first before we can rely entirely on CONFIG_DMI.
1162 */
1163
1112static int __init abituguru3_detect(void) 1164static int __init abituguru3_detect(void)
1113{ 1165{
1114 /* See if there is an uguru3 there. An idle uGuru3 will hold 0x00 or 1166 /* See if there is an uguru3 there. An idle uGuru3 will hold 0x00 or
@@ -1119,7 +1171,7 @@ static int __init abituguru3_detect(void)
1119 if (((data_val == 0x00) || (data_val == 0x08)) && 1171 if (((data_val == 0x00) || (data_val == 0x08)) &&
1120 ((cmd_val == 0xAC) || (cmd_val == 0x05) || 1172 ((cmd_val == 0xAC) || (cmd_val == 0x05) ||
1121 (cmd_val == 0x55))) 1173 (cmd_val == 0x55)))
1122 return ABIT_UGURU3_BASE; 1174 return 0;
1123 1175
1124 ABIT_UGURU3_DEBUG("no Abit uGuru3 found, data = 0x%02X, cmd = " 1176 ABIT_UGURU3_DEBUG("no Abit uGuru3 found, data = 0x%02X, cmd = "
1125 "0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val); 1177 "0x%02X\n", (unsigned int)data_val, (unsigned int)cmd_val);
@@ -1127,7 +1179,7 @@ static int __init abituguru3_detect(void)
1127 if (force) { 1179 if (force) {
1128 printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is " 1180 printk(KERN_INFO ABIT_UGURU3_NAME ": Assuming Abit uGuru3 is "
1129 "present because of \"force\" parameter\n"); 1181 "present because of \"force\" parameter\n");
1130 return ABIT_UGURU3_BASE; 1182 return 0;
1131 } 1183 }
1132 1184
1133 /* No uGuru3 found */ 1185 /* No uGuru3 found */
@@ -1138,27 +1190,29 @@ static struct platform_device *abituguru3_pdev;
1138 1190
1139static int __init abituguru3_init(void) 1191static int __init abituguru3_init(void)
1140{ 1192{
1141 int address, err;
1142 struct resource res = { .flags = IORESOURCE_IO }; 1193 struct resource res = { .flags = IORESOURCE_IO };
1143 1194 int err;
1144#ifdef CONFIG_DMI 1195
1145 const char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR); 1196 /* Attempt DMI detection first */
1146 1197 err = abituguru3_dmi_detect();
1147 /* safety check, refuse to load on non Abit motherboards */ 1198 if (err < 0)
1148 if (!force && (!board_vendor || 1199 return err;
1149 strcmp(board_vendor, "http://www.abit.com.tw/"))) 1200
1150 return -ENODEV; 1201 /* Fall back to manual detection if there was no exact
1151#endif 1202 * board name match, or force was specified.
1152 1203 */
1153 address = abituguru3_detect(); 1204 if (err > 0) {
1154 if (address < 0) 1205 err = abituguru3_detect();
1155 return address; 1206 if (err)
1207 return err;
1208 }
1156 1209
1157 err = platform_driver_register(&abituguru3_driver); 1210 err = platform_driver_register(&abituguru3_driver);
1158 if (err) 1211 if (err)
1159 goto exit; 1212 goto exit;
1160 1213
1161 abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME, address); 1214 abituguru3_pdev = platform_device_alloc(ABIT_UGURU3_NAME,
1215 ABIT_UGURU3_BASE);
1162 if (!abituguru3_pdev) { 1216 if (!abituguru3_pdev) {
1163 printk(KERN_ERR ABIT_UGURU3_NAME 1217 printk(KERN_ERR ABIT_UGURU3_NAME
1164 ": Device allocation failed\n"); 1218 ": Device allocation failed\n");
@@ -1166,8 +1220,8 @@ static int __init abituguru3_init(void)
1166 goto exit_driver_unregister; 1220 goto exit_driver_unregister;
1167 } 1221 }
1168 1222
1169 res.start = address; 1223 res.start = ABIT_UGURU3_BASE;
1170 res.end = address + ABIT_UGURU3_REGION_LENGTH - 1; 1224 res.end = ABIT_UGURU3_BASE + ABIT_UGURU3_REGION_LENGTH - 1;
1171 res.name = ABIT_UGURU3_NAME; 1225 res.name = ABIT_UGURU3_NAME;
1172 1226
1173 err = platform_device_add_resources(abituguru3_pdev, &res, 1); 1227 err = platform_device_add_resources(abituguru3_pdev, &res, 1);
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
new file mode 100644
index 000000000000..242294db3db6
--- /dev/null
+++ b/drivers/hwmon/adcxx.c
@@ -0,0 +1,329 @@
1/*
2 * adcxx.c
3 *
4 * The adcxx4s is an AD converter family from National Semiconductor (NS).
5 *
6 * Copyright (c) 2008 Marc Pignat <marc.pignat@hevs.ch>
7 *
8 * The adcxx4s communicates with a host processor via an SPI/Microwire Bus
9 * interface. This driver supports the whole family of devices with name
10 * ADC<bb><c>S<sss>, where
11 * * bb is the resolution in number of bits (8, 10, 12)
12 * * c is the number of channels (1, 2, 4, 8)
13 * * sss is the maximum conversion speed (021 for 200 kSPS, 051 for 500 kSPS
14 * and 101 for 1 MSPS)
15 *
16 * Complete datasheets are available at National's website here:
17 * http://www.national.com/ds/DC/ADC<bb><c>S<sss>.pdf
18 *
19 * Handling of 8, 10 and 12 bits converters are the same, the
20 * unavailable bits are 0 :)
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2 of the License, or
25 * (at your option) any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 */
36
37#include <linux/init.h>
38#include <linux/module.h>
39#include <linux/kernel.h>
40#include <linux/device.h>
41#include <linux/err.h>
42#include <linux/sysfs.h>
43#include <linux/hwmon.h>
44#include <linux/hwmon-sysfs.h>
45#include <linux/mutex.h>
46#include <linux/spi/spi.h>
47
48#define DRVNAME "adcxx"
49
50struct adcxx {
51 struct device *hwmon_dev;
52 struct mutex lock;
53 u32 channels;
54 u32 reference; /* in millivolts */
55};
56
57/* sysfs hook function */
58static ssize_t adcxx_read(struct device *dev,
59 struct device_attribute *devattr, char *buf)
60{
61 struct spi_device *spi = to_spi_device(dev);
62 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
63 struct adcxx *adc = dev_get_drvdata(&spi->dev);
64 u8 tx_buf[2] = { attr->index << 3 }; /* other bits are don't care */
65 u8 rx_buf[2];
66 int status;
67 int value;
68
69 if (mutex_lock_interruptible(&adc->lock))
70 return -ERESTARTSYS;
71
72 status = spi_write_then_read(spi, tx_buf, sizeof(tx_buf),
73 rx_buf, sizeof(rx_buf));
74 if (status < 0) {
75 dev_warn(dev, "spi_write_then_read failed with status %d\n",
76 status);
77 goto out;
78 }
79
80 value = (rx_buf[0] << 8) + rx_buf[1];
81 dev_dbg(dev, "raw value = 0x%x\n", value);
82
83 value = value * adc->reference >> 12;
84 status = sprintf(buf, "%d\n", value);
85out:
86 mutex_unlock(&adc->lock);
87 return status;
88}
89
90static ssize_t adcxx_show_min(struct device *dev,
91 struct device_attribute *devattr, char *buf)
92{
93 /* The minimum reference is 0 for this chip family */
94 return sprintf(buf, "0\n");
95}
96
97static ssize_t adcxx_show_max(struct device *dev,
98 struct device_attribute *devattr, char *buf)
99{
100 struct spi_device *spi = to_spi_device(dev);
101 struct adcxx *adc = dev_get_drvdata(&spi->dev);
102 u32 reference;
103
104 if (mutex_lock_interruptible(&adc->lock))
105 return -ERESTARTSYS;
106
107 reference = adc->reference;
108
109 mutex_unlock(&adc->lock);
110
111 return sprintf(buf, "%d\n", reference);
112}
113
114static ssize_t adcxx_set_max(struct device *dev,
115 struct device_attribute *devattr, const char *buf, size_t count)
116{
117 struct spi_device *spi = to_spi_device(dev);
118 struct adcxx *adc = dev_get_drvdata(&spi->dev);
119 unsigned long value;
120
121 if (strict_strtoul(buf, 10, &value))
122 return -EINVAL;
123
124 if (mutex_lock_interruptible(&adc->lock))
125 return -ERESTARTSYS;
126
127 adc->reference = value;
128
129 mutex_unlock(&adc->lock);
130
131 return count;
132}
133
134static ssize_t adcxx_show_name(struct device *dev, struct device_attribute
135 *devattr, char *buf)
136{
137 struct spi_device *spi = to_spi_device(dev);
138 struct adcxx *adc = dev_get_drvdata(&spi->dev);
139
140 return sprintf(buf, "adcxx%ds\n", adc->channels);
141}
142
143static struct sensor_device_attribute ad_input[] = {
144 SENSOR_ATTR(name, S_IRUGO, adcxx_show_name, NULL, 0),
145 SENSOR_ATTR(in_min, S_IRUGO, adcxx_show_min, NULL, 0),
146 SENSOR_ATTR(in_max, S_IWUSR | S_IRUGO, adcxx_show_max,
147 adcxx_set_max, 0),
148 SENSOR_ATTR(in0_input, S_IRUGO, adcxx_read, NULL, 0),
149 SENSOR_ATTR(in1_input, S_IRUGO, adcxx_read, NULL, 1),
150 SENSOR_ATTR(in2_input, S_IRUGO, adcxx_read, NULL, 2),
151 SENSOR_ATTR(in3_input, S_IRUGO, adcxx_read, NULL, 3),
152 SENSOR_ATTR(in4_input, S_IRUGO, adcxx_read, NULL, 4),
153 SENSOR_ATTR(in5_input, S_IRUGO, adcxx_read, NULL, 5),
154 SENSOR_ATTR(in6_input, S_IRUGO, adcxx_read, NULL, 6),
155 SENSOR_ATTR(in7_input, S_IRUGO, adcxx_read, NULL, 7),
156};
157
158/*----------------------------------------------------------------------*/
159
160static int __devinit adcxx_probe(struct spi_device *spi, int channels)
161{
162 struct adcxx *adc;
163 int status;
164 int i;
165
166 adc = kzalloc(sizeof *adc, GFP_KERNEL);
167 if (!adc)
168 return -ENOMEM;
169
170 /* set a default value for the reference */
171 adc->reference = 3300;
172 adc->channels = channels;
173 mutex_init(&adc->lock);
174
175 mutex_lock(&adc->lock);
176
177 dev_set_drvdata(&spi->dev, adc);
178
179 for (i = 0; i < 3 + adc->channels; i++) {
180 status = device_create_file(&spi->dev, &ad_input[i].dev_attr);
181 if (status) {
182 dev_err(&spi->dev, "device_create_file failed.\n");
183 goto out_err;
184 }
185 }
186
187 adc->hwmon_dev = hwmon_device_register(&spi->dev);
188 if (IS_ERR(adc->hwmon_dev)) {
189 dev_err(&spi->dev, "hwmon_device_register failed.\n");
190 status = PTR_ERR(adc->hwmon_dev);
191 goto out_err;
192 }
193
194 mutex_unlock(&adc->lock);
195 return 0;
196
197out_err:
198 for (i--; i >= 0; i--)
199 device_remove_file(&spi->dev, &ad_input[i].dev_attr);
200
201 dev_set_drvdata(&spi->dev, NULL);
202 mutex_unlock(&adc->lock);
203 kfree(adc);
204 return status;
205}
206
207static int __devinit adcxx1s_probe(struct spi_device *spi)
208{
209 return adcxx_probe(spi, 1);
210}
211
212static int __devinit adcxx2s_probe(struct spi_device *spi)
213{
214 return adcxx_probe(spi, 2);
215}
216
217static int __devinit adcxx4s_probe(struct spi_device *spi)
218{
219 return adcxx_probe(spi, 4);
220}
221
222static int __devinit adcxx8s_probe(struct spi_device *spi)
223{
224 return adcxx_probe(spi, 8);
225}
226
227static int __devexit adcxx_remove(struct spi_device *spi)
228{
229 struct adcxx *adc = dev_get_drvdata(&spi->dev);
230 int i;
231
232 mutex_lock(&adc->lock);
233 hwmon_device_unregister(adc->hwmon_dev);
234 for (i = 0; i < 3 + adc->channels; i++)
235 device_remove_file(&spi->dev, &ad_input[i].dev_attr);
236
237 dev_set_drvdata(&spi->dev, NULL);
238 mutex_unlock(&adc->lock);
239 kfree(adc);
240
241 return 0;
242}
243
244static struct spi_driver adcxx1s_driver = {
245 .driver = {
246 .name = "adcxx1s",
247 .owner = THIS_MODULE,
248 },
249 .probe = adcxx1s_probe,
250 .remove = __devexit_p(adcxx_remove),
251};
252
253static struct spi_driver adcxx2s_driver = {
254 .driver = {
255 .name = "adcxx2s",
256 .owner = THIS_MODULE,
257 },
258 .probe = adcxx2s_probe,
259 .remove = __devexit_p(adcxx_remove),
260};
261
262static struct spi_driver adcxx4s_driver = {
263 .driver = {
264 .name = "adcxx4s",
265 .owner = THIS_MODULE,
266 },
267 .probe = adcxx4s_probe,
268 .remove = __devexit_p(adcxx_remove),
269};
270
271static struct spi_driver adcxx8s_driver = {
272 .driver = {
273 .name = "adcxx8s",
274 .owner = THIS_MODULE,
275 },
276 .probe = adcxx8s_probe,
277 .remove = __devexit_p(adcxx_remove),
278};
279
280static int __init init_adcxx(void)
281{
282 int status;
283 status = spi_register_driver(&adcxx1s_driver);
284 if (status)
285 goto reg_1_failed;
286
287 status = spi_register_driver(&adcxx2s_driver);
288 if (status)
289 goto reg_2_failed;
290
291 status = spi_register_driver(&adcxx4s_driver);
292 if (status)
293 goto reg_4_failed;
294
295 status = spi_register_driver(&adcxx8s_driver);
296 if (status)
297 goto reg_8_failed;
298
299 return status;
300
301reg_8_failed:
302 spi_unregister_driver(&adcxx4s_driver);
303reg_4_failed:
304 spi_unregister_driver(&adcxx2s_driver);
305reg_2_failed:
306 spi_unregister_driver(&adcxx1s_driver);
307reg_1_failed:
308 return status;
309}
310
311static void __exit exit_adcxx(void)
312{
313 spi_unregister_driver(&adcxx1s_driver);
314 spi_unregister_driver(&adcxx2s_driver);
315 spi_unregister_driver(&adcxx4s_driver);
316 spi_unregister_driver(&adcxx8s_driver);
317}
318
319module_init(init_adcxx);
320module_exit(exit_adcxx);
321
322MODULE_AUTHOR("Marc Pignat");
323MODULE_DESCRIPTION("National Semiconductor adcxx8sxxx Linux driver");
324MODULE_LICENSE("GPL");
325
326MODULE_ALIAS("adcxx1s");
327MODULE_ALIAS("adcxx2s");
328MODULE_ALIAS("adcxx4s");
329MODULE_ALIAS("adcxx8s");
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index aacc0c4b809c..b06b8e090a27 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -98,6 +98,12 @@ static const char* temperature_sensors_sets[][36] = {
98 "TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S", 98 "TH1P", "TH2P", "TH3P", "TMAP", "TMAS", "TMBS", "TM0P", "TM0S",
99 "TM1P", "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P", 99 "TM1P", "TM1S", "TM2P", "TM2S", "TM3S", "TM8P", "TM8S", "TM9P",
100 "TM9S", "TN0H", "TS0C", NULL }, 100 "TM9S", "TN0H", "TS0C", NULL },
101/* Set 5: iMac */
102 { "TC0D", "TA0P", "TG0P", "TG0D", "TG0H", "TH0P", "Tm0P", "TO0P",
103 "Tp0C", NULL },
104/* Set 6: Macbook3 set */
105 { "TB0T", "TC0D", "TC0P", "TM0P", "TN0P", "TTF0", "TW0P", "Th0H",
106 "Th0S", "Th1H", NULL },
101}; 107};
102 108
103/* List of keys used to read/write fan speeds */ 109/* List of keys used to read/write fan speeds */
@@ -1223,6 +1229,10 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
1223 { .accelerometer = 0, .light = 0, .temperature_set = 3 }, 1229 { .accelerometer = 0, .light = 0, .temperature_set = 3 },
1224/* MacPro: temperature set 4 */ 1230/* MacPro: temperature set 4 */
1225 { .accelerometer = 0, .light = 0, .temperature_set = 4 }, 1231 { .accelerometer = 0, .light = 0, .temperature_set = 4 },
1232/* iMac: temperature set 5 */
1233 { .accelerometer = 0, .light = 0, .temperature_set = 5 },
1234/* MacBook3: accelerometer and temperature set 6 */
1235 { .accelerometer = 1, .light = 0, .temperature_set = 6 },
1226}; 1236};
1227 1237
1228/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". 1238/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@@ -1232,10 +1242,14 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1232 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1242 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1233 DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") }, 1243 DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro") },
1234 (void*)&applesmc_dmi_data[0]}, 1244 (void*)&applesmc_dmi_data[0]},
1235 { applesmc_dmi_match, "Apple MacBook", { 1245 { applesmc_dmi_match, "Apple MacBook (v2)", {
1236 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1246 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1237 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") }, 1247 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook2") },
1238 (void*)&applesmc_dmi_data[1]}, 1248 (void*)&applesmc_dmi_data[1]},
1249 { applesmc_dmi_match, "Apple MacBook (v3)", {
1250 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1251 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook3") },
1252 (void*)&applesmc_dmi_data[6]},
1239 { applesmc_dmi_match, "Apple MacBook", { 1253 { applesmc_dmi_match, "Apple MacBook", {
1240 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1254 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1241 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") }, 1255 DMI_MATCH(DMI_PRODUCT_NAME,"MacBook") },
@@ -1248,6 +1262,10 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
1248 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), 1262 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1249 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, 1263 DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
1250 (void*)&applesmc_dmi_data[4]}, 1264 (void*)&applesmc_dmi_data[4]},
1265 { applesmc_dmi_match, "Apple iMac", {
1266 DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
1267 DMI_MATCH(DMI_PRODUCT_NAME,"iMac") },
1268 (void*)&applesmc_dmi_data[5]},
1251 { .ident = NULL } 1269 { .ident = NULL }
1252}; 1270};
1253 1271
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 70239acecc8e..93c17223b527 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -413,10 +413,11 @@ static int __init coretemp_init(void)
413 for_each_online_cpu(i) { 413 for_each_online_cpu(i) {
414 struct cpuinfo_x86 *c = &cpu_data(i); 414 struct cpuinfo_x86 *c = &cpu_data(i);
415 415
416 /* check if family 6, models 0xe, 0xf, 0x16, 0x17 */ 416 /* check if family 6, models 0xe, 0xf, 0x16, 0x17, 0x1A */
417 if ((c->cpuid_level < 0) || (c->x86 != 0x6) || 417 if ((c->cpuid_level < 0) || (c->x86 != 0x6) ||
418 !((c->x86_model == 0xe) || (c->x86_model == 0xf) || 418 !((c->x86_model == 0xe) || (c->x86_model == 0xf) ||
419 (c->x86_model == 0x16) || (c->x86_model == 0x17))) { 419 (c->x86_model == 0x16) || (c->x86_model == 0x17) ||
420 (c->x86_model == 0x1A))) {
420 421
421 /* supported CPU not found, but report the unknown 422 /* supported CPU not found, but report the unknown
422 family 6 CPU */ 423 family 6 CPU */
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 7b0a32c4dcfb..c54eff92be4a 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -37,13 +37,21 @@
37 * For VRD 10.0 and up, "VRD x.y Design Guide", 37 * For VRD 10.0 and up, "VRD x.y Design Guide",
38 * available at http://developer.intel.com/. 38 * available at http://developer.intel.com/.
39 * 39 *
40 * AMD NPT 0Fh (Athlon64 & Opteron), AMD Publication 32559, 40 * AMD Athlon 64 and AMD Opteron Processors, AMD Publication 26094,
41 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/26094.PDF
42 * Table 74. VID Code Voltages
43 * This corresponds to an arbitrary VRM code of 24 in the functions below.
44 * These CPU models (K8 revision <= E) have 5 VID pins. See also:
45 * Revision Guide for AMD Athlon 64 and AMD Opteron Processors, AMD Publication 25759,
46 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/25759.pdf
47 *
48 * AMD NPT Family 0Fh Processors, AMD Publication 32559,
41 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/32559.pdf 49 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/32559.pdf
42 * Table 71. VID Code Voltages 50 * Table 71. VID Code Voltages
43 * AMD Opteron processors don't follow the Intel specifications. 51 * This corresponds to an arbitrary VRM code of 25 in the functions below.
44 * I'm going to "make up" 2.4 as the spec number for the Opterons. 52 * These CPU models (K8 revision >= F) have 6 VID pins. See also:
45 * No good reason just a mnemonic for the 24x Opteron processor 53 * Revision Guide for AMD NPT Family 0Fh Processors, AMD Publication 33610,
46 * series. 54 * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
47 * 55 *
48 * The 17 specification is in fact Intel Mobile Voltage Positioning - 56 * The 17 specification is in fact Intel Mobile Voltage Positioning -
49 * (IMVP-II). You can find more information in the datasheet of Max1718 57 * (IMVP-II). You can find more information in the datasheet of Max1718
@@ -95,7 +103,12 @@ int vid_from_reg(int val, u8 vrm)
95 return 0; 103 return 0;
96 return((1600000 - (val - 2) * 6250 + 500) / 1000); 104 return((1600000 - (val - 2) * 6250 + 500) / 1000);
97 105
98 case 24: /* AMD NPT 0Fh (Athlon64 & Opteron) */ 106 case 24: /* Athlon64 & Opteron */
107 val &= 0x1f;
108 if (val == 0x1f)
109 return 0;
110 /* fall through */
111 case 25: /* AMD NPT 0Fh */
99 val &= 0x3f; 112 val &= 0x3f;
100 return (val < 32) ? 1550 - 25 * val 113 return (val < 32) ? 1550 - 25 * val
101 : 775 - (25 * (val - 31)) / 2; 114 : 775 - (25 * (val - 31)) / 2;
@@ -157,11 +170,16 @@ struct vrm_model {
157 170
158#ifdef CONFIG_X86 171#ifdef CONFIG_X86
159 172
160/* the stepping parameter is highest acceptable stepping for current line */ 173/*
174 * The stepping parameter is highest acceptable stepping for current line.
175 * The model match must be exact for 4-bit values. For model values 0x10
176 * and above (extended model), all models below the parameter will match.
177 */
161 178
162static struct vrm_model vrm_models[] = { 179static struct vrm_model vrm_models[] = {
163 {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */ 180 {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */
164 {X86_VENDOR_AMD, 0xF, ANY, ANY, 24}, /* Athlon 64, Opteron and above VRM 24 */ 181 {X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */
182 {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */
165 {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */ 183 {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */
166 {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */ 184 {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */
167 {X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */ 185 {X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */
@@ -189,6 +207,8 @@ static u8 find_vrm(u8 eff_family, u8 eff_model, u8 eff_stepping, u8 vendor)
189 if (vrm_models[i].vendor==vendor) 207 if (vrm_models[i].vendor==vendor)
190 if ((vrm_models[i].eff_family==eff_family) 208 if ((vrm_models[i].eff_family==eff_family)
191 && ((vrm_models[i].eff_model==eff_model) || 209 && ((vrm_models[i].eff_model==eff_model) ||
210 (vrm_models[i].eff_model >= 0x10 &&
211 eff_model <= vrm_models[i].eff_model) ||
192 (vrm_models[i].eff_model==ANY)) && 212 (vrm_models[i].eff_model==ANY)) &&
193 (eff_stepping <= vrm_models[i].eff_stepping)) 213 (eff_stepping <= vrm_models[i].eff_stepping))
194 return vrm_models[i].vrm_type; 214 return vrm_models[i].vrm_type;
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index f9e2ed621f7b..2ede9388096b 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -81,6 +81,8 @@ static unsigned long amb_reg_temp(unsigned int amb)
81#define MAX_AMBS_PER_CHANNEL 16 81#define MAX_AMBS_PER_CHANNEL 16
82#define MAX_AMBS (MAX_MEM_CHANNELS * \ 82#define MAX_AMBS (MAX_MEM_CHANNELS * \
83 MAX_AMBS_PER_CHANNEL) 83 MAX_AMBS_PER_CHANNEL)
84#define CHANNEL_SHIFT 4
85#define DIMM_MASK 0xF
84/* 86/*
85 * Ugly hack: For some reason the highest bit is set if there 87 * Ugly hack: For some reason the highest bit is set if there
86 * are _any_ DIMMs in the channel. Attempting to read from 88 * are _any_ DIMMs in the channel. Attempting to read from
@@ -89,7 +91,7 @@ static unsigned long amb_reg_temp(unsigned int amb)
89 * might prevent us from seeing the 16th DIMM in the channel. 91 * might prevent us from seeing the 16th DIMM in the channel.
90 */ 92 */
91#define REAL_MAX_AMBS_PER_CHANNEL 15 93#define REAL_MAX_AMBS_PER_CHANNEL 15
92#define KNOBS_PER_AMB 5 94#define KNOBS_PER_AMB 6
93 95
94static unsigned long amb_num_from_reg(unsigned int byte_num, unsigned int bit) 96static unsigned long amb_num_from_reg(unsigned int byte_num, unsigned int bit)
95{ 97{
@@ -238,6 +240,16 @@ static ssize_t show_amb_temp(struct device *dev,
238 500 * amb_read_byte(data, amb_reg_temp(attr->index))); 240 500 * amb_read_byte(data, amb_reg_temp(attr->index)));
239} 241}
240 242
243static ssize_t show_label(struct device *dev,
244 struct device_attribute *devattr,
245 char *buf)
246{
247 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
248
249 return sprintf(buf, "Ch. %d DIMM %d\n", attr->index >> CHANNEL_SHIFT,
250 attr->index & DIMM_MASK);
251}
252
241static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev) 253static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
242{ 254{
243 int i, j, k, d = 0; 255 int i, j, k, d = 0;
@@ -268,6 +280,20 @@ static int __devinit i5k_amb_hwmon_init(struct platform_device *pdev)
268 continue; 280 continue;
269 d++; 281 d++;
270 282
283 /* sysfs label */
284 iattr = data->attrs + data->num_attrs;
285 snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
286 "temp%d_label", d);
287 iattr->s_attr.dev_attr.attr.name = iattr->name;
288 iattr->s_attr.dev_attr.attr.mode = S_IRUGO;
289 iattr->s_attr.dev_attr.show = show_label;
290 iattr->s_attr.index = k;
291 res = device_create_file(&pdev->dev,
292 &iattr->s_attr.dev_attr);
293 if (res)
294 goto exit_remove;
295 data->num_attrs++;
296
271 /* Temperature sysfs knob */ 297 /* Temperature sysfs knob */
272 iattr = data->attrs + data->num_attrs; 298 iattr = data->attrs + data->num_attrs;
273 snprintf(iattr->name, AMB_SYSFS_NAME_LEN, 299 snprintf(iattr->name, AMB_SYSFS_NAME_LEN,
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index c9416e657487..0f70dc204105 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * A hwmon driver for the IBM Active Energy Manager temperature/power sensors 2 * A hwmon driver for the IBM System Director Active Energy Manager (AEM)
3 * and capping functionality. 3 * temperature/power/energy sensors and capping functionality.
4 * Copyright (C) 2008 IBM 4 * Copyright (C) 2008 IBM
5 * 5 *
6 * Author: Darrick J. Wong <djwong@us.ibm.com> 6 * Author: Darrick J. Wong <djwong@us.ibm.com>
@@ -463,12 +463,18 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg,
463} 463}
464 464
465/* Update AEM energy registers */ 465/* Update AEM energy registers */
466static void update_aem_energy_one(struct aem_data *data, int which)
467{
468 aem_read_sensor(data, AEM_ENERGY_ELEMENT, which,
469 &data->energy[which], 8);
470}
471
466static void update_aem_energy(struct aem_data *data) 472static void update_aem_energy(struct aem_data *data)
467{ 473{
468 aem_read_sensor(data, AEM_ENERGY_ELEMENT, 0, &data->energy[0], 8); 474 update_aem_energy_one(data, 0);
469 if (data->ver_major < 2) 475 if (data->ver_major < 2)
470 return; 476 return;
471 aem_read_sensor(data, AEM_ENERGY_ELEMENT, 1, &data->energy[1], 8); 477 update_aem_energy_one(data, 1);
472} 478}
473 479
474/* Update all AEM1 sensors */ 480/* Update all AEM1 sensors */
@@ -676,7 +682,8 @@ static int aem_find_aem2(struct aem_ipmi_data *data,
676 return -ETIMEDOUT; 682 return -ETIMEDOUT;
677 683
678 if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) || 684 if (data->rx_result || data->rx_msg_len != sizeof(*fi_resp) ||
679 memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id))) 685 memcmp(&fi_resp->id, &system_x_id, sizeof(system_x_id)) ||
686 fi_resp->num_instances <= instance_num)
680 return -ENOENT; 687 return -ENOENT;
681 688
682 return 0; 689 return 0;
@@ -849,7 +856,7 @@ static ssize_t aem_show_power(struct device *dev,
849 struct timespec b, a; 856 struct timespec b, a;
850 857
851 mutex_lock(&data->lock); 858 mutex_lock(&data->lock);
852 update_aem_energy(data); 859 update_aem_energy_one(data, attr->index);
853 getnstimeofday(&b); 860 getnstimeofday(&b);
854 before = data->energy[attr->index]; 861 before = data->energy[attr->index];
855 862
@@ -861,7 +868,7 @@ static ssize_t aem_show_power(struct device *dev,
861 return 0; 868 return 0;
862 } 869 }
863 870
864 update_aem_energy(data); 871 update_aem_energy_one(data, attr->index);
865 getnstimeofday(&a); 872 getnstimeofday(&a);
866 after = data->energy[attr->index]; 873 after = data->energy[attr->index];
867 mutex_unlock(&data->lock); 874 mutex_unlock(&data->lock);
@@ -880,7 +887,9 @@ static ssize_t aem_show_energy(struct device *dev,
880{ 887{
881 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); 888 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
882 struct aem_data *a = dev_get_drvdata(dev); 889 struct aem_data *a = dev_get_drvdata(dev);
883 a->update(a); 890 mutex_lock(&a->lock);
891 update_aem_energy_one(a, attr->index);
892 mutex_unlock(&a->lock);
884 893
885 return sprintf(buf, "%llu\n", 894 return sprintf(buf, "%llu\n",
886 (unsigned long long)a->energy[attr->index] * 1000); 895 (unsigned long long)a->energy[attr->index] * 1000);
@@ -1104,7 +1113,7 @@ static void __exit aem_exit(void)
1104} 1113}
1105 1114
1106MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>"); 1115MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
1107MODULE_DESCRIPTION("IBM Active Energy Manager power/temp sensor driver"); 1116MODULE_DESCRIPTION("IBM AEM power/temp/energy sensor driver");
1108MODULE_LICENSE("GPL"); 1117MODULE_LICENSE("GPL");
1109 1118
1110module_init(aem_init); 1119module_init(aem_init);
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 7880c273c2c5..8f9595f2fb53 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -54,11 +54,11 @@ enum lm75_type { /* keep sorted in alphabetical order */
54 tmp75, 54 tmp75,
55}; 55};
56 56
57/* Addresses scanned by legacy style driver binding */ 57/* Addresses scanned */
58static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c, 58static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
59 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; 59 0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
60 60
61/* Insmod parameters (only for legacy style driver binding) */ 61/* Insmod parameters */
62I2C_CLIENT_INSMOD_1(lm75); 62I2C_CLIENT_INSMOD_1(lm75);
63 63
64 64
@@ -72,7 +72,6 @@ static const u8 LM75_REG_TEMP[3] = {
72 72
73/* Each client has this additional data */ 73/* Each client has this additional data */
74struct lm75_data { 74struct lm75_data {
75 struct i2c_client *client;
76 struct device *hwmon_dev; 75 struct device *hwmon_dev;
77 struct mutex update_lock; 76 struct mutex update_lock;
78 u8 orig_conf; 77 u8 orig_conf;
@@ -138,7 +137,7 @@ static const struct attribute_group lm75_group = {
138 137
139/*-----------------------------------------------------------------------*/ 138/*-----------------------------------------------------------------------*/
140 139
141/* "New style" I2C driver binding -- following the driver model */ 140/* device probe and removal */
142 141
143static int 142static int
144lm75_probe(struct i2c_client *client, const struct i2c_device_id *id) 143lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
@@ -157,8 +156,6 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
157 return -ENOMEM; 156 return -ENOMEM;
158 157
159 i2c_set_clientdata(client, data); 158 i2c_set_clientdata(client, data);
160
161 data->client = client;
162 mutex_init(&data->update_lock); 159 mutex_init(&data->update_lock);
163 160
164 /* Set to LM75 resolution (9 bits, 1/2 degree C) and range. 161 /* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
@@ -236,45 +233,16 @@ static const struct i2c_device_id lm75_ids[] = {
236}; 233};
237MODULE_DEVICE_TABLE(i2c, lm75_ids); 234MODULE_DEVICE_TABLE(i2c, lm75_ids);
238 235
239static struct i2c_driver lm75_driver = { 236/* Return 0 if detection is successful, -ENODEV otherwise */
240 .driver = { 237static int lm75_detect(struct i2c_client *new_client, int kind,
241 .name = "lm75", 238 struct i2c_board_info *info)
242 },
243 .probe = lm75_probe,
244 .remove = lm75_remove,
245 .id_table = lm75_ids,
246};
247
248/*-----------------------------------------------------------------------*/
249
250/* "Legacy" I2C driver binding */
251
252static struct i2c_driver lm75_legacy_driver;
253
254/* This function is called by i2c_probe */
255static int lm75_detect(struct i2c_adapter *adapter, int address, int kind)
256{ 239{
240 struct i2c_adapter *adapter = new_client->adapter;
257 int i; 241 int i;
258 struct i2c_client *new_client;
259 int err = 0;
260 242
261 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | 243 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
262 I2C_FUNC_SMBUS_WORD_DATA)) 244 I2C_FUNC_SMBUS_WORD_DATA))
263 goto exit; 245 return -ENODEV;
264
265 /* OK. For now, we presume we have a valid address. We create the
266 client structure, even though there may be no sensor present.
267 But it allows us to use i2c_smbus_read_*_data() calls. */
268 new_client = kzalloc(sizeof *new_client, GFP_KERNEL);
269 if (!new_client) {
270 err = -ENOMEM;
271 goto exit;
272 }
273
274 new_client->addr = address;
275 new_client->adapter = adapter;
276 new_client->driver = &lm75_legacy_driver;
277 new_client->flags = 0;
278 246
279 /* Now, we do the remaining detection. There is no identification- 247 /* Now, we do the remaining detection. There is no identification-
280 dedicated register so we have to rely on several tricks: 248 dedicated register so we have to rely on several tricks:
@@ -294,71 +262,44 @@ static int lm75_detect(struct i2c_adapter *adapter, int address, int kind)
294 || i2c_smbus_read_word_data(new_client, 5) != hyst 262 || i2c_smbus_read_word_data(new_client, 5) != hyst
295 || i2c_smbus_read_word_data(new_client, 6) != hyst 263 || i2c_smbus_read_word_data(new_client, 6) != hyst
296 || i2c_smbus_read_word_data(new_client, 7) != hyst) 264 || i2c_smbus_read_word_data(new_client, 7) != hyst)
297 goto exit_free; 265 return -ENODEV;
298 os = i2c_smbus_read_word_data(new_client, 3); 266 os = i2c_smbus_read_word_data(new_client, 3);
299 if (i2c_smbus_read_word_data(new_client, 4) != os 267 if (i2c_smbus_read_word_data(new_client, 4) != os
300 || i2c_smbus_read_word_data(new_client, 5) != os 268 || i2c_smbus_read_word_data(new_client, 5) != os
301 || i2c_smbus_read_word_data(new_client, 6) != os 269 || i2c_smbus_read_word_data(new_client, 6) != os
302 || i2c_smbus_read_word_data(new_client, 7) != os) 270 || i2c_smbus_read_word_data(new_client, 7) != os)
303 goto exit_free; 271 return -ENODEV;
304 272
305 /* Unused bits */ 273 /* Unused bits */
306 if (conf & 0xe0) 274 if (conf & 0xe0)
307 goto exit_free; 275 return -ENODEV;
308 276
309 /* Addresses cycling */ 277 /* Addresses cycling */
310 for (i = 8; i < 0xff; i += 8) 278 for (i = 8; i < 0xff; i += 8)
311 if (i2c_smbus_read_byte_data(new_client, i + 1) != conf 279 if (i2c_smbus_read_byte_data(new_client, i + 1) != conf
312 || i2c_smbus_read_word_data(new_client, i + 2) != hyst 280 || i2c_smbus_read_word_data(new_client, i + 2) != hyst
313 || i2c_smbus_read_word_data(new_client, i + 3) != os) 281 || i2c_smbus_read_word_data(new_client, i + 3) != os)
314 goto exit_free; 282 return -ENODEV;
315 } 283 }
316 284
317 /* NOTE: we treat "force=..." and "force_lm75=..." the same. 285 /* NOTE: we treat "force=..." and "force_lm75=..." the same.
318 * Only new-style driver binding distinguishes chip types. 286 * Only new-style driver binding distinguishes chip types.
319 */ 287 */
320 strlcpy(new_client->name, "lm75", I2C_NAME_SIZE); 288 strlcpy(info->type, "lm75", I2C_NAME_SIZE);
321
322 /* Tell the I2C layer a new client has arrived */
323 err = i2c_attach_client(new_client);
324 if (err)
325 goto exit_free;
326
327 err = lm75_probe(new_client, NULL);
328 if (err < 0)
329 goto exit_detach;
330 289
331 return 0; 290 return 0;
332
333exit_detach:
334 i2c_detach_client(new_client);
335exit_free:
336 kfree(new_client);
337exit:
338 return err;
339}
340
341static int lm75_attach_adapter(struct i2c_adapter *adapter)
342{
343 if (!(adapter->class & I2C_CLASS_HWMON))
344 return 0;
345 return i2c_probe(adapter, &addr_data, lm75_detect);
346} 291}
347 292
348static int lm75_detach_client(struct i2c_client *client) 293static struct i2c_driver lm75_driver = {
349{ 294 .class = I2C_CLASS_HWMON,
350 lm75_remove(client);
351 i2c_detach_client(client);
352 kfree(client);
353 return 0;
354}
355
356static struct i2c_driver lm75_legacy_driver = {
357 .driver = { 295 .driver = {
358 .name = "lm75_legacy", 296 .name = "lm75",
359 }, 297 },
360 .attach_adapter = lm75_attach_adapter, 298 .probe = lm75_probe,
361 .detach_client = lm75_detach_client, 299 .remove = lm75_remove,
300 .id_table = lm75_ids,
301 .detect = lm75_detect,
302 .address_data = &addr_data,
362}; 303};
363 304
364/*-----------------------------------------------------------------------*/ 305/*-----------------------------------------------------------------------*/
@@ -424,22 +365,11 @@ static struct lm75_data *lm75_update_device(struct device *dev)
424 365
425static int __init sensors_lm75_init(void) 366static int __init sensors_lm75_init(void)
426{ 367{
427 int status; 368 return i2c_add_driver(&lm75_driver);
428
429 status = i2c_add_driver(&lm75_driver);
430 if (status < 0)
431 return status;
432
433 status = i2c_add_driver(&lm75_legacy_driver);
434 if (status < 0)
435 i2c_del_driver(&lm75_driver);
436
437 return status;
438} 369}
439 370
440static void __exit sensors_lm75_exit(void) 371static void __exit sensors_lm75_exit(void)
441{ 372{
442 i2c_del_driver(&lm75_legacy_driver);
443 i2c_del_driver(&lm75_driver); 373 i2c_del_driver(&lm75_driver);
444} 374}
445 375
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index daa7d121483b..de21142d106c 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1055,9 +1055,10 @@ static int w83791d_probe(struct i2c_client *client,
1055{ 1055{
1056 struct w83791d_data *data; 1056 struct w83791d_data *data;
1057 struct device *dev = &client->dev; 1057 struct device *dev = &client->dev;
1058 int i, val1, err; 1058 int i, err;
1059 1059
1060#ifdef DEBUG 1060#ifdef DEBUG
1061 int val1;
1061 val1 = w83791d_read(client, W83791D_REG_DID_VID4); 1062 val1 = w83791d_read(client, W83791D_REG_DID_VID4);
1062 dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n", 1063 dev_dbg(dev, "Device ID version: %d.%d (0x%02x)\n",
1063 (val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1); 1064 (val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 96867347bcbf..711ca08ab776 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -38,6 +38,20 @@ config I2C_CHARDEV
38 This support is also available as a module. If so, the module 38 This support is also available as a module. If so, the module
39 will be called i2c-dev. 39 will be called i2c-dev.
40 40
41config I2C_HELPER_AUTO
42 bool "Autoselect pertinent helper modules"
43 default y
44 help
45 Some I2C bus drivers require so-called "I2C algorithm" modules
46 to work. These are basically software-only abstractions of generic
47 I2C interfaces. This option will autoselect them so that you don't
48 have to care.
49
50 Unselect this only if you need to enable additional helper
51 modules, for example for use with external I2C bus drivers.
52
53 In doubt, say Y.
54
41source drivers/i2c/algos/Kconfig 55source drivers/i2c/algos/Kconfig
42source drivers/i2c/busses/Kconfig 56source drivers/i2c/busses/Kconfig
43source drivers/i2c/chips/Kconfig 57source drivers/i2c/chips/Kconfig
diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
index 7137a17402fe..b788579b8227 100644
--- a/drivers/i2c/algos/Kconfig
+++ b/drivers/i2c/algos/Kconfig
@@ -2,15 +2,20 @@
2# I2C algorithm drivers configuration 2# I2C algorithm drivers configuration
3# 3#
4 4
5menu "I2C Algorithms"
6 depends on !I2C_HELPER_AUTO
7
5config I2C_ALGOBIT 8config I2C_ALGOBIT
6 tristate 9 tristate "I2C bit-banging interfaces"
7 10
8config I2C_ALGOPCF 11config I2C_ALGOPCF
9 tristate 12 tristate "I2C PCF 8584 interfaces"
10 13
11config I2C_ALGOPCA 14config I2C_ALGOPCA
12 tristate 15 tristate "I2C PCA 9564 interfaces"
13 16
14config I2C_ALGO_SGI 17config I2C_ALGO_SGI
15 tristate 18 tristate
16 depends on SGI_IP22 || SGI_IP32 || X86_VISWS 19 depends on SGI_IP22 || SGI_IP32 || X86_VISWS
20
21endmenu
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
index 72872d1e63ef..8ba2bcf727d3 100644
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
@@ -155,6 +155,9 @@ static int __init amd756_s4882_init(void)
155 int i, error; 155 int i, error;
156 union i2c_smbus_data ioconfig; 156 union i2c_smbus_data ioconfig;
157 157
158 if (!amd756_smbus.dev.parent)
159 return -ENODEV;
160
158 /* Configure the PCA9556 multiplexer */ 161 /* Configure the PCA9556 multiplexer */
159 ioconfig.byte = 0x00; /* All I/O to output mode */ 162 ioconfig.byte = 0x00; /* All I/O to output mode */
160 error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, 163 error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03,
@@ -168,11 +171,7 @@ static int __init amd756_s4882_init(void)
168 /* Unregister physical bus */ 171 /* Unregister physical bus */
169 error = i2c_del_adapter(&amd756_smbus); 172 error = i2c_del_adapter(&amd756_smbus);
170 if (error) { 173 if (error) {
171 if (error == -EINVAL) 174 dev_err(&amd756_smbus.dev, "Physical bus removal failed\n");
172 error = -ENODEV;
173 else
174 dev_err(&amd756_smbus.dev, "Physical bus removal "
175 "failed\n");
176 goto ERROR0; 175 goto ERROR0;
177 } 176 }
178 177
diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
index d1a4cbcf2aa4..29015eb9ca46 100644
--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
@@ -150,6 +150,9 @@ static int __init nforce2_s4985_init(void)
150 int i, error; 150 int i, error;
151 union i2c_smbus_data ioconfig; 151 union i2c_smbus_data ioconfig;
152 152
153 if (!nforce2_smbus)
154 return -ENODEV;
155
153 /* Configure the PCA9556 multiplexer */ 156 /* Configure the PCA9556 multiplexer */
154 ioconfig.byte = 0x00; /* All I/O to output mode */ 157 ioconfig.byte = 0x00; /* All I/O to output mode */
155 error = i2c_smbus_xfer(nforce2_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, 158 error = i2c_smbus_xfer(nforce2_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03,
@@ -161,8 +164,6 @@ static int __init nforce2_s4985_init(void)
161 } 164 }
162 165
163 /* Unregister physical bus */ 166 /* Unregister physical bus */
164 if (!nforce2_smbus)
165 return -ENODEV;
166 error = i2c_del_adapter(nforce2_smbus); 167 error = i2c_del_adapter(nforce2_smbus);
167 if (error) { 168 if (error) {
168 dev_err(&nforce2_smbus->dev, "Physical bus removal failed\n"); 169 dev_err(&nforce2_smbus->dev, "Physical bus removal failed\n");
diff --git a/drivers/i2c/chips/at24.c b/drivers/i2c/chips/at24.c
index e764c94f3e3d..2a4acb269569 100644
--- a/drivers/i2c/chips/at24.c
+++ b/drivers/i2c/chips/at24.c
@@ -188,7 +188,7 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
188 count = I2C_SMBUS_BLOCK_MAX; 188 count = I2C_SMBUS_BLOCK_MAX;
189 status = i2c_smbus_read_i2c_block_data(client, offset, 189 status = i2c_smbus_read_i2c_block_data(client, offset,
190 count, buf); 190 count, buf);
191 dev_dbg(&client->dev, "smbus read %zd@%d --> %d\n", 191 dev_dbg(&client->dev, "smbus read %zu@%d --> %d\n",
192 count, offset, status); 192 count, offset, status);
193 return (status < 0) ? -EIO : status; 193 return (status < 0) ? -EIO : status;
194 } 194 }
@@ -214,7 +214,7 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
214 msg[1].len = count; 214 msg[1].len = count;
215 215
216 status = i2c_transfer(client->adapter, msg, 2); 216 status = i2c_transfer(client->adapter, msg, 2);
217 dev_dbg(&client->dev, "i2c read %zd@%d --> %d\n", 217 dev_dbg(&client->dev, "i2c read %zu@%d --> %d\n",
218 count, offset, status); 218 count, offset, status);
219 219
220 if (status == 2) 220 if (status == 2)
@@ -334,7 +334,7 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf,
334 if (status == 1) 334 if (status == 1)
335 status = count; 335 status = count;
336 } 336 }
337 dev_dbg(&client->dev, "write %zd@%d --> %zd (%ld)\n", 337 dev_dbg(&client->dev, "write %zu@%d --> %zd (%ld)\n",
338 count, offset, status, jiffies); 338 count, offset, status, jiffies);
339 339
340 if (status == count) 340 if (status == count)
@@ -512,7 +512,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
512 512
513 i2c_set_clientdata(client, at24); 513 i2c_set_clientdata(client, at24);
514 514
515 dev_info(&client->dev, "%Zd byte %s EEPROM %s\n", 515 dev_info(&client->dev, "%zu byte %s EEPROM %s\n",
516 at24->bin.size, client->name, 516 at24->bin.size, client->name,
517 writable ? "(writable)" : "(read-only)"); 517 writable ? "(writable)" : "(read-only)");
518 dev_dbg(&client->dev, 518 dev_dbg(&client->dev,
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 18355ae2155d..4655b794ebe3 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -1593,7 +1593,7 @@ fail1:
1593 if (machine_is_omap_h2()) { 1593 if (machine_is_omap_h2()) {
1594 /* full speed signaling by default */ 1594 /* full speed signaling by default */
1595 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, 1595 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1,
1596 MC1_SPEED_REG); 1596 MC1_SPEED);
1597 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, 1597 isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2,
1598 MC2_SPD_SUSP_CTRL); 1598 MC2_SPD_SUSP_CTRL);
1599 1599
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 7bf38c418086..550853f79ae8 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -813,7 +813,12 @@ static int i2c_check_addr(struct i2c_adapter *adapter, int addr)
813int i2c_attach_client(struct i2c_client *client) 813int i2c_attach_client(struct i2c_client *client)
814{ 814{
815 struct i2c_adapter *adapter = client->adapter; 815 struct i2c_adapter *adapter = client->adapter;
816 int res = 0; 816 int res;
817
818 /* Check for address business */
819 res = i2c_check_addr(adapter, client->addr);
820 if (res)
821 return res;
817 822
818 client->dev.parent = &client->adapter->dev; 823 client->dev.parent = &client->adapter->dev;
819 client->dev.bus = &i2c_bus_type; 824 client->dev.bus = &i2c_bus_type;
@@ -1451,9 +1456,11 @@ i2c_new_probed_device(struct i2c_adapter *adap,
1451 if ((addr_list[i] & ~0x07) == 0x30 1456 if ((addr_list[i] & ~0x07) == 0x30
1452 || (addr_list[i] & ~0x0f) == 0x50 1457 || (addr_list[i] & ~0x0f) == 0x50
1453 || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) { 1458 || !i2c_check_functionality(adap, I2C_FUNC_SMBUS_QUICK)) {
1459 union i2c_smbus_data data;
1460
1454 if (i2c_smbus_xfer(adap, addr_list[i], 0, 1461 if (i2c_smbus_xfer(adap, addr_list[i], 0,
1455 I2C_SMBUS_READ, 0, 1462 I2C_SMBUS_READ, 0,
1456 I2C_SMBUS_BYTE, NULL) >= 0) 1463 I2C_SMBUS_BYTE, &data) >= 0)
1457 break; 1464 break;
1458 } else { 1465 } else {
1459 if (i2c_smbus_xfer(adap, addr_list[i], 0, 1466 if (i2c_smbus_xfer(adap, addr_list[i], 0,
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 9d55c6383b23..af4491fa7e34 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -147,7 +147,7 @@ static ssize_t i2cdev_read (struct file *file, char __user *buf, size_t count,
147 if (tmp==NULL) 147 if (tmp==NULL)
148 return -ENOMEM; 148 return -ENOMEM;
149 149
150 pr_debug("i2c-dev: i2c-%d reading %zd bytes.\n", 150 pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
151 iminor(file->f_path.dentry->d_inode), count); 151 iminor(file->f_path.dentry->d_inode), count);
152 152
153 ret = i2c_master_recv(client,tmp,count); 153 ret = i2c_master_recv(client,tmp,count);
@@ -175,7 +175,7 @@ static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t c
175 return -EFAULT; 175 return -EFAULT;
176 } 176 }
177 177
178 pr_debug("i2c-dev: i2c-%d writing %zd bytes.\n", 178 pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
179 iminor(file->f_path.dentry->d_inode), count); 179 iminor(file->f_path.dentry->d_inode), count);
180 180
181 ret = i2c_master_send(client,tmp,count); 181 ret = i2c_master_send(client,tmp,count);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 89a112d513ad..49a8c589e346 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1272,9 +1272,9 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
1272 */ 1272 */
1273static void msf_from_bcd(struct atapi_msf *msf) 1273static void msf_from_bcd(struct atapi_msf *msf)
1274{ 1274{
1275 msf->minute = BCD2BIN(msf->minute); 1275 msf->minute = bcd2bin(msf->minute);
1276 msf->second = BCD2BIN(msf->second); 1276 msf->second = bcd2bin(msf->second);
1277 msf->frame = BCD2BIN(msf->frame); 1277 msf->frame = bcd2bin(msf->frame);
1278} 1278}
1279 1279
1280int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) 1280int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
@@ -1415,8 +1415,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1415 return stat; 1415 return stat;
1416 1416
1417 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { 1417 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1418 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1418 toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
1419 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1419 toc->hdr.last_track = bcd2bin(toc->hdr.last_track);
1420 } 1420 }
1421 1421
1422 ntracks = toc->hdr.last_track - toc->hdr.first_track + 1; 1422 ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
@@ -1456,8 +1456,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1456 return stat; 1456 return stat;
1457 1457
1458 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { 1458 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1459 toc->hdr.first_track = (u8)BIN2BCD(CDROM_LEADOUT); 1459 toc->hdr.first_track = (u8)bin2bcd(CDROM_LEADOUT);
1460 toc->hdr.last_track = (u8)BIN2BCD(CDROM_LEADOUT); 1460 toc->hdr.last_track = (u8)bin2bcd(CDROM_LEADOUT);
1461 } else { 1461 } else {
1462 toc->hdr.first_track = CDROM_LEADOUT; 1462 toc->hdr.first_track = CDROM_LEADOUT;
1463 toc->hdr.last_track = CDROM_LEADOUT; 1463 toc->hdr.last_track = CDROM_LEADOUT;
@@ -1470,14 +1470,14 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
1470 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length); 1470 toc->hdr.toc_length = be16_to_cpu(toc->hdr.toc_length);
1471 1471
1472 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) { 1472 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) {
1473 toc->hdr.first_track = BCD2BIN(toc->hdr.first_track); 1473 toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
1474 toc->hdr.last_track = BCD2BIN(toc->hdr.last_track); 1474 toc->hdr.last_track = bcd2bin(toc->hdr.last_track);
1475 } 1475 }
1476 1476
1477 for (i = 0; i <= ntracks; i++) { 1477 for (i = 0; i <= ntracks; i++) {
1478 if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { 1478 if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) {
1479 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD) 1479 if (drive->atapi_flags & IDE_AFLAG_TOCTRACKS_AS_BCD)
1480 toc->ent[i].track = BCD2BIN(toc->ent[i].track); 1480 toc->ent[i].track = bcd2bin(toc->ent[i].track);
1481 msf_from_bcd(&toc->ent[i].addr.msf); 1481 msf_from_bcd(&toc->ent[i].addr.msf);
1482 } 1482 }
1483 toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute, 1483 toc->ent[i].addr.lba = msf_to_lba(toc->ent[i].addr.msf.minute,
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index 40644b6f1c00..3187215e8f89 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -307,7 +307,7 @@ static struct pci_driver driver = {
307 .name = "AEC62xx_IDE", 307 .name = "AEC62xx_IDE",
308 .id_table = aec62xx_pci_tbl, 308 .id_table = aec62xx_pci_tbl,
309 .probe = aec62xx_init_one, 309 .probe = aec62xx_init_one,
310 .remove = aec62xx_remove, 310 .remove = __devexit_p(aec62xx_remove),
311}; 311};
312 312
313static int __init aec62xx_ide_init(void) 313static int __init aec62xx_ide_init(void)
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index bfae2f882f48..e6d8ee88d56d 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -447,7 +447,7 @@ static struct pci_driver driver = {
447 .name = "Cypress_IDE", 447 .name = "Cypress_IDE",
448 .id_table = cy82c693_pci_tbl, 448 .id_table = cy82c693_pci_tbl,
449 .probe = cy82c693_init_one, 449 .probe = cy82c693_init_one,
450 .remove = cy82c693_remove, 450 .remove = __devexit_p(cy82c693_remove),
451}; 451};
452 452
453static int __init cy82c693_ide_init(void) 453static int __init cy82c693_ide_init(void)
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 748793a413ab..eb107eef0dbc 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1620,7 +1620,7 @@ static struct pci_driver driver = {
1620 .name = "HPT366_IDE", 1620 .name = "HPT366_IDE",
1621 .id_table = hpt366_pci_tbl, 1621 .id_table = hpt366_pci_tbl,
1622 .probe = hpt366_init_one, 1622 .probe = hpt366_init_one,
1623 .remove = hpt366_remove, 1623 .remove = __devexit_p(hpt366_remove),
1624}; 1624};
1625 1625
1626static int __init hpt366_ide_init(void) 1626static int __init hpt366_ide_init(void)
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index b6dc723de702..4a1508a707cc 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -686,7 +686,7 @@ static struct pci_driver driver = {
686 .name = "ITE821x IDE", 686 .name = "ITE821x IDE",
687 .id_table = it821x_pci_tbl, 687 .id_table = it821x_pci_tbl,
688 .probe = it821x_init_one, 688 .probe = it821x_init_one,
689 .remove = it821x_remove, 689 .remove = __devexit_p(it821x_remove),
690}; 690};
691 691
692static int __init it821x_ide_init(void) 692static int __init it821x_ide_init(void)
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 0f609b72f470..d477da6b5858 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -566,7 +566,7 @@ static struct pci_driver driver = {
566 .name = "Promise_IDE", 566 .name = "Promise_IDE",
567 .id_table = pdc202new_pci_tbl, 567 .id_table = pdc202new_pci_tbl,
568 .probe = pdc202new_init_one, 568 .probe = pdc202new_init_one,
569 .remove = pdc202new_remove, 569 .remove = __devexit_p(pdc202new_remove),
570}; 570};
571 571
572static int __init pdc202new_ide_init(void) 572static int __init pdc202new_ide_init(void)
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 6cde48bba6f8..44cccd1e086a 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -954,7 +954,7 @@ static struct pci_driver driver = {
954 .name = "SCC IDE", 954 .name = "SCC IDE",
955 .id_table = scc_pci_tbl, 955 .id_table = scc_pci_tbl,
956 .probe = scc_init_one, 956 .probe = scc_init_one,
957 .remove = scc_remove, 957 .remove = __devexit_p(scc_remove),
958}; 958};
959 959
960static int scc_ide_init(void) 960static int scc_ide_init(void)
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index 42eef19a18f1..681306c9d79b 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -621,9 +621,9 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
621 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE, 621 if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
622 DRV_NAME)) { 622 DRV_NAME)) {
623 printk(KERN_ERR 623 printk(KERN_ERR
624 "%s : %s -- ERROR, Addresses " 624 "%s %s: -- ERROR, Addresses "
625 "0x%p to 0x%p ALREADY in use\n", 625 "0x%p to 0x%p ALREADY in use\n",
626 __func__, DRV_NAME, (void *) cmd_phys_base, 626 DRV_NAME, pci_name(dev), (void *)cmd_phys_base,
627 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE); 627 (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
628 return -ENOMEM; 628 return -ENOMEM;
629 } 629 }
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 445ce6fbea33..db2b88a369ab 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -832,7 +832,7 @@ static struct pci_driver driver = {
832 .name = "SiI_IDE", 832 .name = "SiI_IDE",
833 .id_table = siimage_pci_tbl, 833 .id_table = siimage_pci_tbl,
834 .probe = siimage_init_one, 834 .probe = siimage_init_one,
835 .remove = siimage_remove, 835 .remove = __devexit_p(siimage_remove),
836}; 836};
837 837
838static int __init siimage_ide_init(void) 838static int __init siimage_ide_init(void)
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index e5a4b42b4e33..5efe21d6ef97 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -610,7 +610,7 @@ static struct pci_driver driver = {
610 .name = "SIS_IDE", 610 .name = "SIS_IDE",
611 .id_table = sis5513_pci_tbl, 611 .id_table = sis5513_pci_tbl,
612 .probe = sis5513_init_one, 612 .probe = sis5513_init_one,
613 .remove = sis5513_remove, 613 .remove = __devexit_p(sis5513_remove),
614}; 614};
615 615
616static int __init sis5513_ide_init(void) 616static int __init sis5513_ide_init(void)
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index 7fc88c375e5d..927277c54ec9 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -249,7 +249,7 @@ static struct pci_driver driver = {
249 .name = "TC86C001", 249 .name = "TC86C001",
250 .id_table = tc86c001_pci_tbl, 250 .id_table = tc86c001_pci_tbl,
251 .probe = tc86c001_init_one, 251 .probe = tc86c001_init_one,
252 .remove = tc86c001_remove, 252 .remove = __devexit_p(tc86c001_remove),
253}; 253};
254 254
255static int __init tc86c001_ide_init(void) 255static int __init tc86c001_ide_init(void)
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index a6b2cc83f293..94fb9ab3223f 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -491,7 +491,7 @@ static struct pci_driver driver = {
491 .name = "VIA_IDE", 491 .name = "VIA_IDE",
492 .id_table = via_pci_tbl, 492 .id_table = via_pci_tbl,
493 .probe = via_init_one, 493 .probe = via_init_one,
494 .remove = via_remove, 494 .remove = __devexit_p(via_remove),
495}; 495};
496 496
497static int __init via_ide_init(void) 497static int __init via_ide_init(void)
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 0b0618edd645..1ab919f836a8 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -156,6 +156,14 @@ struct ehca_mod_qp_parm {
156 156
157#define EHCA_MOD_QP_PARM_MAX 4 157#define EHCA_MOD_QP_PARM_MAX 4
158 158
159#define QMAP_IDX_MASK 0xFFFFULL
160
161/* struct for tracking if cqes have been reported to the application */
162struct ehca_qmap_entry {
163 u16 app_wr_id;
164 u16 reported;
165};
166
159struct ehca_qp { 167struct ehca_qp {
160 union { 168 union {
161 struct ib_qp ib_qp; 169 struct ib_qp ib_qp;
@@ -165,6 +173,7 @@ struct ehca_qp {
165 enum ehca_ext_qp_type ext_type; 173 enum ehca_ext_qp_type ext_type;
166 enum ib_qp_state state; 174 enum ib_qp_state state;
167 struct ipz_queue ipz_squeue; 175 struct ipz_queue ipz_squeue;
176 struct ehca_qmap_entry *sq_map;
168 struct ipz_queue ipz_rqueue; 177 struct ipz_queue ipz_rqueue;
169 struct h_galpas galpas; 178 struct h_galpas galpas;
170 u32 qkey; 179 u32 qkey;
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h
index 818803057ebf..5d28e3e98a20 100644
--- a/drivers/infiniband/hw/ehca/ehca_qes.h
+++ b/drivers/infiniband/hw/ehca/ehca_qes.h
@@ -213,6 +213,7 @@ struct ehca_wqe {
213#define WC_STATUS_ERROR_BIT 0x80000000 213#define WC_STATUS_ERROR_BIT 0x80000000
214#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 214#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
215#define WC_STATUS_PURGE_BIT 0x10 215#define WC_STATUS_PURGE_BIT 0x10
216#define WC_SEND_RECEIVE_BIT 0x80
216 217
217struct ehca_cqe { 218struct ehca_cqe {
218 u64 work_request_id; 219 u64 work_request_id;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index ea13efddf175..b6bcee036734 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -412,6 +412,7 @@ static struct ehca_qp *internal_create_qp(
412 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, 412 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
413 ib_device); 413 ib_device);
414 struct ib_ucontext *context = NULL; 414 struct ib_ucontext *context = NULL;
415 u32 nr_qes;
415 u64 h_ret; 416 u64 h_ret;
416 int is_llqp = 0, has_srq = 0; 417 int is_llqp = 0, has_srq = 0;
417 int qp_type, max_send_sge, max_recv_sge, ret; 418 int qp_type, max_send_sge, max_recv_sge, ret;
@@ -715,6 +716,15 @@ static struct ehca_qp *internal_create_qp(
715 "and pages ret=%i", ret); 716 "and pages ret=%i", ret);
716 goto create_qp_exit2; 717 goto create_qp_exit2;
717 } 718 }
719 nr_qes = my_qp->ipz_squeue.queue_length /
720 my_qp->ipz_squeue.qe_size;
721 my_qp->sq_map = vmalloc(nr_qes *
722 sizeof(struct ehca_qmap_entry));
723 if (!my_qp->sq_map) {
724 ehca_err(pd->device, "Couldn't allocate squeue "
725 "map ret=%i", ret);
726 goto create_qp_exit3;
727 }
718 } 728 }
719 729
720 if (HAS_RQ(my_qp)) { 730 if (HAS_RQ(my_qp)) {
@@ -724,7 +734,7 @@ static struct ehca_qp *internal_create_qp(
724 if (ret) { 734 if (ret) {
725 ehca_err(pd->device, "Couldn't initialize rqueue " 735 ehca_err(pd->device, "Couldn't initialize rqueue "
726 "and pages ret=%i", ret); 736 "and pages ret=%i", ret);
727 goto create_qp_exit3; 737 goto create_qp_exit4;
728 } 738 }
729 } 739 }
730 740
@@ -770,7 +780,7 @@ static struct ehca_qp *internal_create_qp(
770 if (!my_qp->mod_qp_parm) { 780 if (!my_qp->mod_qp_parm) {
771 ehca_err(pd->device, 781 ehca_err(pd->device,
772 "Could not alloc mod_qp_parm"); 782 "Could not alloc mod_qp_parm");
773 goto create_qp_exit4; 783 goto create_qp_exit5;
774 } 784 }
775 } 785 }
776 } 786 }
@@ -780,7 +790,7 @@ static struct ehca_qp *internal_create_qp(
780 h_ret = ehca_define_sqp(shca, my_qp, init_attr); 790 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
781 if (h_ret != H_SUCCESS) { 791 if (h_ret != H_SUCCESS) {
782 ret = ehca2ib_return_code(h_ret); 792 ret = ehca2ib_return_code(h_ret);
783 goto create_qp_exit5; 793 goto create_qp_exit6;
784 } 794 }
785 } 795 }
786 796
@@ -789,7 +799,7 @@ static struct ehca_qp *internal_create_qp(
789 if (ret) { 799 if (ret) {
790 ehca_err(pd->device, 800 ehca_err(pd->device,
791 "Couldn't assign qp to send_cq ret=%i", ret); 801 "Couldn't assign qp to send_cq ret=%i", ret);
792 goto create_qp_exit5; 802 goto create_qp_exit6;
793 } 803 }
794 } 804 }
795 805
@@ -815,22 +825,26 @@ static struct ehca_qp *internal_create_qp(
815 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 825 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
816 ehca_err(pd->device, "Copy to udata failed"); 826 ehca_err(pd->device, "Copy to udata failed");
817 ret = -EINVAL; 827 ret = -EINVAL;
818 goto create_qp_exit6; 828 goto create_qp_exit7;
819 } 829 }
820 } 830 }
821 831
822 return my_qp; 832 return my_qp;
823 833
824create_qp_exit6: 834create_qp_exit7:
825 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); 835 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
826 836
827create_qp_exit5: 837create_qp_exit6:
828 kfree(my_qp->mod_qp_parm); 838 kfree(my_qp->mod_qp_parm);
829 839
830create_qp_exit4: 840create_qp_exit5:
831 if (HAS_RQ(my_qp)) 841 if (HAS_RQ(my_qp))
832 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 842 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
833 843
844create_qp_exit4:
845 if (HAS_SQ(my_qp))
846 vfree(my_qp->sq_map);
847
834create_qp_exit3: 848create_qp_exit3:
835 if (HAS_SQ(my_qp)) 849 if (HAS_SQ(my_qp))
836 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 850 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
@@ -1534,8 +1548,6 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1534 if (attr_mask & IB_QP_QKEY) 1548 if (attr_mask & IB_QP_QKEY)
1535 my_qp->qkey = attr->qkey; 1549 my_qp->qkey = attr->qkey;
1536 1550
1537 my_qp->state = qp_new_state;
1538
1539modify_qp_exit2: 1551modify_qp_exit2:
1540 if (squeue_locked) { /* this means: sqe -> rts */ 1552 if (squeue_locked) { /* this means: sqe -> rts */
1541 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 1553 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1551,6 +1563,8 @@ modify_qp_exit1:
1551int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1563int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1552 struct ib_udata *udata) 1564 struct ib_udata *udata)
1553{ 1565{
1566 int ret = 0;
1567
1554 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, 1568 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1555 ib_device); 1569 ib_device);
1556 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1570 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
@@ -1597,12 +1611,18 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1597 attr->qp_state, my_qp->init_attr.port_num, 1611 attr->qp_state, my_qp->init_attr.port_num,
1598 ibqp->qp_type); 1612 ibqp->qp_type);
1599 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); 1613 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1600 return 0; 1614 goto out;
1601 } 1615 }
1602 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags); 1616 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1603 } 1617 }
1604 1618
1605 return internal_modify_qp(ibqp, attr, attr_mask, 0); 1619 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1620
1621out:
1622 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1623 my_qp->state = attr->qp_state;
1624
1625 return ret;
1606} 1626}
1607 1627
1608void ehca_recover_sqp(struct ib_qp *sqp) 1628void ehca_recover_sqp(struct ib_qp *sqp)
@@ -1973,8 +1993,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1973 1993
1974 if (HAS_RQ(my_qp)) 1994 if (HAS_RQ(my_qp))
1975 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 1995 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
1976 if (HAS_SQ(my_qp)) 1996 if (HAS_SQ(my_qp)) {
1977 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 1997 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
1998 vfree(my_qp->sq_map);
1999 }
1978 kmem_cache_free(qp_cache, my_qp); 2000 kmem_cache_free(qp_cache, my_qp);
1979 atomic_dec(&shca->num_qps); 2001 atomic_dec(&shca->num_qps);
1980 return 0; 2002 return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 898c8b5c38dd..4426d82fe798 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -139,6 +139,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
139static inline int ehca_write_swqe(struct ehca_qp *qp, 139static inline int ehca_write_swqe(struct ehca_qp *qp,
140 struct ehca_wqe *wqe_p, 140 struct ehca_wqe *wqe_p,
141 const struct ib_send_wr *send_wr, 141 const struct ib_send_wr *send_wr,
142 u32 sq_map_idx,
142 int hidden) 143 int hidden)
143{ 144{
144 u32 idx; 145 u32 idx;
@@ -157,7 +158,11 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
157 /* clear wqe header until sglist */ 158 /* clear wqe header until sglist */
158 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); 159 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
159 160
160 wqe_p->work_request_id = send_wr->wr_id; 161 wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK;
162 wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK;
163
164 qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK;
165 qp->sq_map[sq_map_idx].reported = 0;
161 166
162 switch (send_wr->opcode) { 167 switch (send_wr->opcode) {
163 case IB_WR_SEND: 168 case IB_WR_SEND:
@@ -381,6 +386,7 @@ static inline int post_one_send(struct ehca_qp *my_qp,
381{ 386{
382 struct ehca_wqe *wqe_p; 387 struct ehca_wqe *wqe_p;
383 int ret; 388 int ret;
389 u32 sq_map_idx;
384 u64 start_offset = my_qp->ipz_squeue.current_q_offset; 390 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
385 391
386 /* get pointer next to free WQE */ 392 /* get pointer next to free WQE */
@@ -393,8 +399,15 @@ static inline int post_one_send(struct ehca_qp *my_qp,
393 "qp_num=%x", my_qp->ib_qp.qp_num); 399 "qp_num=%x", my_qp->ib_qp.qp_num);
394 return -ENOMEM; 400 return -ENOMEM;
395 } 401 }
402
403 /*
404 * Get the index of the WQE in the send queue. The same index is used
405 * for writing into the sq_map.
406 */
407 sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
408
396 /* write a SEND WQE into the QUEUE */ 409 /* write a SEND WQE into the QUEUE */
397 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden); 410 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
398 /* 411 /*
399 * if something failed, 412 * if something failed,
400 * reset the free entry pointer to the start value 413 * reset the free entry pointer to the start value
@@ -589,7 +602,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
589 struct ehca_qp *my_qp; 602 struct ehca_qp *my_qp;
590 int cqe_count = 0, is_error; 603 int cqe_count = 0, is_error;
591 604
592poll_cq_one_read_cqe: 605repoll:
593 cqe = (struct ehca_cqe *) 606 cqe = (struct ehca_cqe *)
594 ipz_qeit_get_inc_valid(&my_cq->ipz_queue); 607 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
595 if (!cqe) { 608 if (!cqe) {
@@ -617,7 +630,7 @@ poll_cq_one_read_cqe:
617 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", 630 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
618 my_cq->cq_number, cqe->local_qp_number); 631 my_cq->cq_number, cqe->local_qp_number);
619 /* ignore this purged cqe */ 632 /* ignore this purged cqe */
620 goto poll_cq_one_read_cqe; 633 goto repoll;
621 } 634 }
622 spin_lock_irqsave(&qp->spinlock_s, flags); 635 spin_lock_irqsave(&qp->spinlock_s, flags);
623 purgeflag = qp->sqerr_purgeflag; 636 purgeflag = qp->sqerr_purgeflag;
@@ -636,7 +649,7 @@ poll_cq_one_read_cqe:
636 * that caused sqe and turn off purge flag 649 * that caused sqe and turn off purge flag
637 */ 650 */
638 qp->sqerr_purgeflag = 0; 651 qp->sqerr_purgeflag = 0;
639 goto poll_cq_one_read_cqe; 652 goto repoll;
640 } 653 }
641 } 654 }
642 655
@@ -654,8 +667,34 @@ poll_cq_one_read_cqe:
654 my_cq, my_cq->cq_number); 667 my_cq, my_cq->cq_number);
655 } 668 }
656 669
657 /* we got a completion! */ 670 read_lock(&ehca_qp_idr_lock);
658 wc->wr_id = cqe->work_request_id; 671 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
672 read_unlock(&ehca_qp_idr_lock);
673 if (!my_qp)
674 goto repoll;
675 wc->qp = &my_qp->ib_qp;
676
677 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) {
678 struct ehca_qmap_entry *qmap_entry;
679 /*
680 * We got a send completion and need to restore the original
681 * wr_id.
682 */
683 qmap_entry = &my_qp->sq_map[cqe->work_request_id &
684 QMAP_IDX_MASK];
685
686 if (qmap_entry->reported) {
687 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
688 my_qp->real_qp_num);
689 /* found a double cqe, discard it and read next one */
690 goto repoll;
691 }
692 wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK;
693 wc->wr_id |= qmap_entry->app_wr_id;
694 qmap_entry->reported = 1;
695 } else
696 /* We got a receive completion. */
697 wc->wr_id = cqe->work_request_id;
659 698
660 /* eval ib_wc_opcode */ 699 /* eval ib_wc_opcode */
661 wc->opcode = ib_wc_opcode[cqe->optype]-1; 700 wc->opcode = ib_wc_opcode[cqe->optype]-1;
@@ -667,7 +706,7 @@ poll_cq_one_read_cqe:
667 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", 706 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
668 my_cq, my_cq->cq_number); 707 my_cq, my_cq->cq_number);
669 /* update also queue adder to throw away this entry!!! */ 708 /* update also queue adder to throw away this entry!!! */
670 goto poll_cq_one_exit0; 709 goto repoll;
671 } 710 }
672 711
673 /* eval ib_wc_status */ 712 /* eval ib_wc_status */
@@ -678,11 +717,6 @@ poll_cq_one_read_cqe:
678 } else 717 } else
679 wc->status = IB_WC_SUCCESS; 718 wc->status = IB_WC_SUCCESS;
680 719
681 read_lock(&ehca_qp_idr_lock);
682 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
683 wc->qp = &my_qp->ib_qp;
684 read_unlock(&ehca_qp_idr_lock);
685
686 wc->byte_len = cqe->nr_bytes_transferred; 720 wc->byte_len = cqe->nr_bytes_transferred;
687 wc->pkey_index = cqe->pkey_index; 721 wc->pkey_index = cqe->pkey_index;
688 wc->slid = cqe->rlid; 722 wc->slid = cqe->rlid;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7ebc400a4b3d..341ffedafed6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -202,7 +202,7 @@ static void ipoib_cm_free_rx_ring(struct net_device *dev,
202 dev_kfree_skb_any(rx_ring[i].skb); 202 dev_kfree_skb_any(rx_ring[i].skb);
203 } 203 }
204 204
205 kfree(rx_ring); 205 vfree(rx_ring);
206} 206}
207 207
208static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) 208static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
@@ -352,9 +352,14 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
352 int ret; 352 int ret;
353 int i; 353 int i;
354 354
355 rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL); 355 rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
356 if (!rx->rx_ring) 356 if (!rx->rx_ring) {
357 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
358 priv->ca->name, ipoib_recvq_size);
357 return -ENOMEM; 359 return -ENOMEM;
360 }
361
362 memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
358 363
359 t = kmalloc(sizeof *t, GFP_KERNEL); 364 t = kmalloc(sizeof *t, GFP_KERNEL);
360 if (!t) { 365 if (!t) {
@@ -1494,14 +1499,16 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1494 return; 1499 return;
1495 } 1500 }
1496 1501
1497 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, 1502 priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1498 GFP_KERNEL);
1499 if (!priv->cm.srq_ring) { 1503 if (!priv->cm.srq_ring) {
1500 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", 1504 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1501 priv->ca->name, ipoib_recvq_size); 1505 priv->ca->name, ipoib_recvq_size);
1502 ib_destroy_srq(priv->cm.srq); 1506 ib_destroy_srq(priv->cm.srq);
1503 priv->cm.srq = NULL; 1507 priv->cm.srq = NULL;
1508 return;
1504 } 1509 }
1510
1511 memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1505} 1512}
1506 1513
1507int ipoib_cm_dev_init(struct net_device *dev) 1514int ipoib_cm_dev_init(struct net_device *dev)
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 2d65411f6763..a92d81567559 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -647,6 +647,47 @@ static int str_to_user(const char *str, unsigned int maxlen, void __user *p)
647 return copy_to_user(p, str, len) ? -EFAULT : len; 647 return copy_to_user(p, str, len) ? -EFAULT : len;
648} 648}
649 649
650#define OLD_KEY_MAX 0x1ff
651static int handle_eviocgbit(struct input_dev *dev, unsigned int cmd, void __user *p, int compat_mode)
652{
653 static unsigned long keymax_warn_time;
654 unsigned long *bits;
655 int len;
656
657 switch (_IOC_NR(cmd) & EV_MAX) {
658
659 case 0: bits = dev->evbit; len = EV_MAX; break;
660 case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
661 case EV_REL: bits = dev->relbit; len = REL_MAX; break;
662 case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
663 case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
664 case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
665 case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
666 case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
667 case EV_SW: bits = dev->swbit; len = SW_MAX; break;
668 default: return -EINVAL;
669 }
670
671 /*
672 * Work around bugs in userspace programs that like to do
673 * EVIOCGBIT(EV_KEY, KEY_MAX) and not realize that 'len'
674 * should be in bytes, not in bits.
675 */
676 if ((_IOC_NR(cmd) & EV_MAX) == EV_KEY && _IOC_SIZE(cmd) == OLD_KEY_MAX) {
677 len = OLD_KEY_MAX;
678 if (printk_timed_ratelimit(&keymax_warn_time, 10 * 1000))
679 printk(KERN_WARNING
680 "evdev.c(EVIOCGBIT): Suspicious buffer size %d, "
681 "limiting output to %d bytes. See "
682 "http://userweb.kernel.org/~dtor/eviocgbit-bug.html\n",
683 OLD_KEY_MAX,
684 BITS_TO_LONGS(OLD_KEY_MAX) * sizeof(long));
685 }
686
687 return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
688}
689#undef OLD_KEY_MAX
690
650static long evdev_do_ioctl(struct file *file, unsigned int cmd, 691static long evdev_do_ioctl(struct file *file, unsigned int cmd,
651 void __user *p, int compat_mode) 692 void __user *p, int compat_mode)
652{ 693{
@@ -733,26 +774,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
733 774
734 if (_IOC_DIR(cmd) == _IOC_READ) { 775 if (_IOC_DIR(cmd) == _IOC_READ) {
735 776
736 if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0))) { 777 if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0)))
737 778 return handle_eviocgbit(dev, cmd, p, compat_mode);
738 unsigned long *bits;
739 int len;
740
741 switch (_IOC_NR(cmd) & EV_MAX) {
742
743 case 0: bits = dev->evbit; len = EV_MAX; break;
744 case EV_KEY: bits = dev->keybit; len = KEY_MAX; break;
745 case EV_REL: bits = dev->relbit; len = REL_MAX; break;
746 case EV_ABS: bits = dev->absbit; len = ABS_MAX; break;
747 case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break;
748 case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
749 case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
750 case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
751 case EV_SW: bits = dev->swbit; len = SW_MAX; break;
752 default: return -EINVAL;
753 }
754 return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
755 }
756 779
757 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0))) 780 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGKEY(0)))
758 return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd), 781 return bits_to_user(dev->key, KEY_MAX, _IOC_SIZE(cmd),
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 87d3e7eabffd..6791be81eb29 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -127,6 +127,7 @@ static const struct xpad_device {
127 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 }, 127 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
128 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 128 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
129 { 0x0c12, 0x8802, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, 129 { 0x0c12, 0x8802, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
130 { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", MAP_DPAD_TO_AXES, XTYPE_XBOX },
130 { 0x0c12, 0x8810, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, 131 { 0x0c12, 0x8810, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
131 { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", MAP_DPAD_TO_AXES, XTYPE_XBOX }, 132 { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", MAP_DPAD_TO_AXES, XTYPE_XBOX },
132 { 0x0e4c, 0x1097, "Radica Gamester Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX }, 133 { 0x0e4c, 0x1097, "Radica Gamester Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index be58730e636a..3f48279f2195 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -118,6 +118,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev)
118 unsigned int type = button->type ?: EV_KEY; 118 unsigned int type = button->type ?: EV_KEY;
119 119
120 bdata->input = input; 120 bdata->input = input;
121 bdata->button = button;
121 setup_timer(&bdata->timer, 122 setup_timer(&bdata->timer,
122 gpio_check_button, (unsigned long)bdata); 123 gpio_check_button, (unsigned long)bdata);
123 124
@@ -256,7 +257,7 @@ static int gpio_keys_resume(struct platform_device *pdev)
256#define gpio_keys_resume NULL 257#define gpio_keys_resume NULL
257#endif 258#endif
258 259
259struct platform_driver gpio_keys_device_driver = { 260static struct platform_driver gpio_keys_device_driver = {
260 .probe = gpio_keys_probe, 261 .probe = gpio_keys_probe,
261 .remove = __devexit_p(gpio_keys_remove), 262 .remove = __devexit_p(gpio_keys_remove),
262 .suspend = gpio_keys_suspend, 263 .suspend = gpio_keys_suspend,
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 7bbea097cda2..f996546fc443 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -130,6 +130,29 @@ config MOUSE_APPLETOUCH
130 To compile this driver as a module, choose M here: the 130 To compile this driver as a module, choose M here: the
131 module will be called appletouch. 131 module will be called appletouch.
132 132
133config MOUSE_BCM5974
134 tristate "Apple USB BCM5974 Multitouch trackpad support"
135 depends on USB_ARCH_HAS_HCD
136 select USB
137 help
138 Say Y here if you have an Apple USB BCM5974 Multitouch
139 trackpad.
140
141 The BCM5974 is the multitouch trackpad found in the Macbook
142 Air (JAN2008) and Macbook Pro Penryn (FEB2008) laptops.
143
144 It is also found in the IPhone (2007) and Ipod Touch (2008).
145
146 This driver provides multitouch functionality together with
147 the synaptics X11 driver.
148
149 The interface is currently identical to the appletouch interface,
150 for further information, see
151 <file:Documentation/input/appletouch.txt>.
152
153 To compile this driver as a module, choose M here: the
154 module will be called bcm5974.
155
133config MOUSE_INPORT 156config MOUSE_INPORT
134 tristate "InPort/MS/ATIXL busmouse" 157 tristate "InPort/MS/ATIXL busmouse"
135 depends on ISA 158 depends on ISA
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 9e6e36330820..d4d202516090 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -6,6 +6,7 @@
6 6
7obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o 7obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o
8obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o 8obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o
9obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o
9obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o 10obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o
10obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o 11obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o
11obj-$(CONFIG_MOUSE_INPORT) += inport.o 12obj-$(CONFIG_MOUSE_INPORT) += inport.o
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
new file mode 100644
index 000000000000..2ec921bf3c60
--- /dev/null
+++ b/drivers/input/mouse/bcm5974.c
@@ -0,0 +1,681 @@
1/*
2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
3 *
4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
5 *
6 * The USB initialization and package decoding was made by
7 * Scott Shawcroft as part of the touchd user-space driver project:
8 * Copyright (C) 2008 Scott Shawcroft (scott.shawcroft@gmail.com)
9 *
10 * The BCM5974 driver is based on the appletouch driver:
11 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
12 * Copyright (C) 2005 Johannes Berg (johannes@sipsolutions.net)
13 * Copyright (C) 2005 Stelian Pop (stelian@popies.net)
14 * Copyright (C) 2005 Frank Arnold (frank@scirocco-5v-turbo.de)
15 * Copyright (C) 2005 Peter Osterlund (petero2@telia.com)
16 * Copyright (C) 2005 Michael Hanselmann (linux-kernel@hansmi.ch)
17 * Copyright (C) 2006 Nicolas Boichat (nicolas@boichat.ch)
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/errno.h>
37#include <linux/init.h>
38#include <linux/slab.h>
39#include <linux/module.h>
40#include <linux/usb/input.h>
41#include <linux/hid.h>
42#include <linux/mutex.h>
43
44#define USB_VENDOR_ID_APPLE 0x05ac
45
46/* MacbookAir, aka wellspring */
47#define USB_DEVICE_ID_APPLE_WELLSPRING_ANSI 0x0223
48#define USB_DEVICE_ID_APPLE_WELLSPRING_ISO 0x0224
49#define USB_DEVICE_ID_APPLE_WELLSPRING_JIS 0x0225
50/* MacbookProPenryn, aka wellspring2 */
51#define USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI 0x0230
52#define USB_DEVICE_ID_APPLE_WELLSPRING2_ISO 0x0231
53#define USB_DEVICE_ID_APPLE_WELLSPRING2_JIS 0x0232
54
55#define BCM5974_DEVICE(prod) { \
56 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
57 USB_DEVICE_ID_MATCH_INT_CLASS | \
58 USB_DEVICE_ID_MATCH_INT_PROTOCOL), \
59 .idVendor = USB_VENDOR_ID_APPLE, \
60 .idProduct = (prod), \
61 .bInterfaceClass = USB_INTERFACE_CLASS_HID, \
62 .bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE \
63}
64
65/* table of devices that work with this driver */
66static const struct usb_device_id bcm5974_table [] = {
67 /* MacbookAir1.1 */
68 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
69 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
70 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING_JIS),
71 /* MacbookProPenryn */
72 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI),
73 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_ISO),
74 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING2_JIS),
75 /* Terminating entry */
76 {}
77};
78MODULE_DEVICE_TABLE(usb, bcm5974_table);
79
80MODULE_AUTHOR("Henrik Rydberg");
81MODULE_DESCRIPTION("Apple USB BCM5974 multitouch driver");
82MODULE_LICENSE("GPL");
83
84#define dprintk(level, format, a...)\
85 { if (debug >= level) printk(KERN_DEBUG format, ##a); }
86
87static int debug = 1;
88module_param(debug, int, 0644);
89MODULE_PARM_DESC(debug, "Activate debugging output");
90
91/* button data structure */
92struct bt_data {
93 u8 unknown1; /* constant */
94 u8 button; /* left button */
95 u8 rel_x; /* relative x coordinate */
96 u8 rel_y; /* relative y coordinate */
97};
98
99/* trackpad header structure */
100struct tp_header {
101 u8 unknown1[16]; /* constants, timers, etc */
102 u8 fingers; /* number of fingers on trackpad */
103 u8 unknown2[9]; /* constants, timers, etc */
104};
105
106/* trackpad finger structure */
107struct tp_finger {
108 __le16 origin; /* left/right origin? */
109 __le16 abs_x; /* absolute x coodinate */
110 __le16 abs_y; /* absolute y coodinate */
111 __le16 rel_x; /* relative x coodinate */
112 __le16 rel_y; /* relative y coodinate */
113 __le16 size_major; /* finger size, major axis? */
114 __le16 size_minor; /* finger size, minor axis? */
115 __le16 orientation; /* 16384 when point, else 15 bit angle */
116 __le16 force_major; /* trackpad force, major axis? */
117 __le16 force_minor; /* trackpad force, minor axis? */
118 __le16 unused[3]; /* zeros */
119 __le16 multi; /* one finger: varies, more fingers: constant */
120};
121
122/* trackpad data structure, empirically at least ten fingers */
123struct tp_data {
124 struct tp_header header;
125 struct tp_finger finger[16];
126};
127
128/* device-specific parameters */
129struct bcm5974_param {
130 int dim; /* logical dimension */
131 int fuzz; /* logical noise value */
132 int devmin; /* device minimum reading */
133 int devmax; /* device maximum reading */
134};
135
136/* device-specific configuration */
137struct bcm5974_config {
138 int ansi, iso, jis; /* the product id of this device */
139 int bt_ep; /* the endpoint of the button interface */
140 int bt_datalen; /* data length of the button interface */
141 int tp_ep; /* the endpoint of the trackpad interface */
142 int tp_datalen; /* data length of the trackpad interface */
143 struct bcm5974_param p; /* finger pressure limits */
144 struct bcm5974_param w; /* finger width limits */
145 struct bcm5974_param x; /* horizontal limits */
146 struct bcm5974_param y; /* vertical limits */
147};
148
149/* logical device structure */
150struct bcm5974 {
151 char phys[64];
152 struct usb_device *udev; /* usb device */
153 struct usb_interface *intf; /* our interface */
154 struct input_dev *input; /* input dev */
155 struct bcm5974_config cfg; /* device configuration */
156 struct mutex pm_mutex; /* serialize access to open/suspend */
157 int opened; /* 1: opened, 0: closed */
158 struct urb *bt_urb; /* button usb request block */
159 struct bt_data *bt_data; /* button transferred data */
160 struct urb *tp_urb; /* trackpad usb request block */
161 struct tp_data *tp_data; /* trackpad transferred data */
162};
163
164/* logical dimensions */
165#define DIM_PRESSURE 256 /* maximum finger pressure */
166#define DIM_WIDTH 16 /* maximum finger width */
167#define DIM_X 1280 /* maximum trackpad x value */
168#define DIM_Y 800 /* maximum trackpad y value */
169
170/* logical signal quality */
171#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
172#define SN_WIDTH 100 /* width signal-to-noise ratio */
173#define SN_COORD 250 /* coordinate signal-to-noise ratio */
174
175/* device constants */
176static const struct bcm5974_config bcm5974_config_table[] = {
177 {
178 USB_DEVICE_ID_APPLE_WELLSPRING_ANSI,
179 USB_DEVICE_ID_APPLE_WELLSPRING_ISO,
180 USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
181 0x84, sizeof(struct bt_data),
182 0x81, sizeof(struct tp_data),
183 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 },
184 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
185 { DIM_X, DIM_X / SN_COORD, -4824, 5342 },
186 { DIM_Y, DIM_Y / SN_COORD, -172, 5820 }
187 },
188 {
189 USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI,
190 USB_DEVICE_ID_APPLE_WELLSPRING2_ISO,
191 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
192 0x84, sizeof(struct bt_data),
193 0x81, sizeof(struct tp_data),
194 { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 },
195 { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
196 { DIM_X, DIM_X / SN_COORD, -4824, 4824 },
197 { DIM_Y, DIM_Y / SN_COORD, -172, 4290 }
198 },
199 {}
200};
201
202/* return the device-specific configuration by device */
203static const struct bcm5974_config *bcm5974_get_config(struct usb_device *udev)
204{
205 u16 id = le16_to_cpu(udev->descriptor.idProduct);
206 const struct bcm5974_config *cfg;
207
208 for (cfg = bcm5974_config_table; cfg->ansi; ++cfg)
209 if (cfg->ansi == id || cfg->iso == id || cfg->jis == id)
210 return cfg;
211
212 return bcm5974_config_table;
213}
214
215/* convert 16-bit little endian to signed integer */
216static inline int raw2int(__le16 x)
217{
218 return (signed short)le16_to_cpu(x);
219}
220
221/* scale device data to logical dimensions (asserts devmin < devmax) */
222static inline int int2scale(const struct bcm5974_param *p, int x)
223{
224 return x * p->dim / (p->devmax - p->devmin);
225}
226
227/* all logical value ranges are [0,dim). */
228static inline int int2bound(const struct bcm5974_param *p, int x)
229{
230 int s = int2scale(p, x);
231
232 return clamp_val(s, 0, p->dim - 1);
233}
234
235/* setup which logical events to report */
236static void setup_events_to_report(struct input_dev *input_dev,
237 const struct bcm5974_config *cfg)
238{
239 __set_bit(EV_ABS, input_dev->evbit);
240
241 input_set_abs_params(input_dev, ABS_PRESSURE,
242 0, cfg->p.dim, cfg->p.fuzz, 0);
243 input_set_abs_params(input_dev, ABS_TOOL_WIDTH,
244 0, cfg->w.dim, cfg->w.fuzz, 0);
245 input_set_abs_params(input_dev, ABS_X,
246 0, cfg->x.dim, cfg->x.fuzz, 0);
247 input_set_abs_params(input_dev, ABS_Y,
248 0, cfg->y.dim, cfg->y.fuzz, 0);
249
250 __set_bit(EV_KEY, input_dev->evbit);
251 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
252 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
253 __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
254 __set_bit(BTN_LEFT, input_dev->keybit);
255}
256
257/* report button data as logical button state */
258static int report_bt_state(struct bcm5974 *dev, int size)
259{
260 if (size != sizeof(struct bt_data))
261 return -EIO;
262
263 input_report_key(dev->input, BTN_LEFT, dev->bt_data->button);
264 input_sync(dev->input);
265
266 return 0;
267}
268
269/* report trackpad data as logical trackpad state */
270static int report_tp_state(struct bcm5974 *dev, int size)
271{
272 const struct bcm5974_config *c = &dev->cfg;
273 const struct tp_finger *f = dev->tp_data->finger;
274 struct input_dev *input = dev->input;
275 const int fingers = (size - 26) / 28;
276 int p = 0, w, x, y, n = 0;
277
278 if (size < 26 || (size - 26) % 28 != 0)
279 return -EIO;
280
281 if (fingers) {
282 p = raw2int(f->force_major);
283 w = raw2int(f->size_major);
284 x = raw2int(f->abs_x);
285 y = raw2int(f->abs_y);
286 n = p > 0 ? fingers : 0;
287
288 dprintk(9,
289 "bcm5974: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n",
290 p, w, x, y, n);
291
292 input_report_abs(input, ABS_TOOL_WIDTH, int2bound(&c->w, w));
293 input_report_abs(input, ABS_X, int2bound(&c->x, x - c->x.devmin));
294 input_report_abs(input, ABS_Y, int2bound(&c->y, c->y.devmax - y));
295 }
296
297 input_report_abs(input, ABS_PRESSURE, int2bound(&c->p, p));
298
299 input_report_key(input, BTN_TOOL_FINGER, n == 1);
300 input_report_key(input, BTN_TOOL_DOUBLETAP, n == 2);
301 input_report_key(input, BTN_TOOL_TRIPLETAP, n > 2);
302
303 input_sync(input);
304
305 return 0;
306}
307
308/* Wellspring initialization constants */
309#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
310#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
311#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
312#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
313#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
314
315static int bcm5974_wellspring_mode(struct bcm5974 *dev)
316{
317 char *data = kmalloc(8, GFP_KERNEL);
318 int retval = 0, size;
319
320 if (!data) {
321 err("bcm5974: out of memory");
322 retval = -ENOMEM;
323 goto out;
324 }
325
326 /* read configuration */
327 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
328 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
329 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
330 BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
331 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
332
333 if (size != 8) {
334 err("bcm5974: could not read from device");
335 retval = -EIO;
336 goto out;
337 }
338
339 /* apply the mode switch */
340 data[0] = BCM5974_WELLSPRING_MODE_VENDOR_VALUE;
341
342 /* write configuration */
343 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
344 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
345 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
346 BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
347 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
348
349 if (size != 8) {
350 err("bcm5974: could not write to device");
351 retval = -EIO;
352 goto out;
353 }
354
355 dprintk(2, "bcm5974: switched to wellspring mode.\n");
356
357 out:
358 kfree(data);
359 return retval;
360}
361
362static void bcm5974_irq_button(struct urb *urb)
363{
364 struct bcm5974 *dev = urb->context;
365 int error;
366
367 switch (urb->status) {
368 case 0:
369 break;
370 case -EOVERFLOW:
371 case -ECONNRESET:
372 case -ENOENT:
373 case -ESHUTDOWN:
374 dbg("bcm5974: button urb shutting down: %d", urb->status);
375 return;
376 default:
377 dbg("bcm5974: button urb status: %d", urb->status);
378 goto exit;
379 }
380
381 if (report_bt_state(dev, dev->bt_urb->actual_length))
382 dprintk(1, "bcm5974: bad button package, length: %d\n",
383 dev->bt_urb->actual_length);
384
385exit:
386 error = usb_submit_urb(dev->bt_urb, GFP_ATOMIC);
387 if (error)
388 err("bcm5974: button urb failed: %d", error);
389}
390
391static void bcm5974_irq_trackpad(struct urb *urb)
392{
393 struct bcm5974 *dev = urb->context;
394 int error;
395
396 switch (urb->status) {
397 case 0:
398 break;
399 case -EOVERFLOW:
400 case -ECONNRESET:
401 case -ENOENT:
402 case -ESHUTDOWN:
403 dbg("bcm5974: trackpad urb shutting down: %d", urb->status);
404 return;
405 default:
406 dbg("bcm5974: trackpad urb status: %d", urb->status);
407 goto exit;
408 }
409
410 /* control response ignored */
411 if (dev->tp_urb->actual_length == 2)
412 goto exit;
413
414 if (report_tp_state(dev, dev->tp_urb->actual_length))
415 dprintk(1, "bcm5974: bad trackpad package, length: %d\n",
416 dev->tp_urb->actual_length);
417
418exit:
419 error = usb_submit_urb(dev->tp_urb, GFP_ATOMIC);
420 if (error)
421 err("bcm5974: trackpad urb failed: %d", error);
422}
423
424/*
425 * The Wellspring trackpad, like many recent Apple trackpads, share
426 * the usb device with the keyboard. Since keyboards are usually
427 * handled by the HID system, the device ends up being handled by two
428 * modules. Setting up the device therefore becomes slightly
429 * complicated. To enable multitouch features, a mode switch is
430 * required, which is usually applied via the control interface of the
431 * device. It can be argued where this switch should take place. In
432 * some drivers, like appletouch, the switch is made during
433 * probe. However, the hid module may also alter the state of the
434 * device, resulting in trackpad malfunction under certain
435 * circumstances. To get around this problem, there is at least one
436 * example that utilizes the USB_QUIRK_RESET_RESUME quirk in order to
437 * recieve a reset_resume request rather than the normal resume.
438 * Since the implementation of reset_resume is equal to mode switch
439 * plus start_traffic, it seems easier to always do the switch when
440 * starting traffic on the device.
441 */
442static int bcm5974_start_traffic(struct bcm5974 *dev)
443{
444 if (bcm5974_wellspring_mode(dev)) {
445 dprintk(1, "bcm5974: mode switch failed\n");
446 goto error;
447 }
448
449 if (usb_submit_urb(dev->bt_urb, GFP_KERNEL))
450 goto error;
451
452 if (usb_submit_urb(dev->tp_urb, GFP_KERNEL))
453 goto err_kill_bt;
454
455 return 0;
456
457err_kill_bt:
458 usb_kill_urb(dev->bt_urb);
459error:
460 return -EIO;
461}
462
463static void bcm5974_pause_traffic(struct bcm5974 *dev)
464{
465 usb_kill_urb(dev->tp_urb);
466 usb_kill_urb(dev->bt_urb);
467}
468
469/*
470 * The code below implements open/close and manual suspend/resume.
471 * All functions may be called in random order.
472 *
473 * Opening a suspended device fails with EACCES - permission denied.
474 *
475 * Failing a resume leaves the device resumed but closed.
476 */
477static int bcm5974_open(struct input_dev *input)
478{
479 struct bcm5974 *dev = input_get_drvdata(input);
480 int error;
481
482 error = usb_autopm_get_interface(dev->intf);
483 if (error)
484 return error;
485
486 mutex_lock(&dev->pm_mutex);
487
488 error = bcm5974_start_traffic(dev);
489 if (!error)
490 dev->opened = 1;
491
492 mutex_unlock(&dev->pm_mutex);
493
494 if (error)
495 usb_autopm_put_interface(dev->intf);
496
497 return error;
498}
499
500static void bcm5974_close(struct input_dev *input)
501{
502 struct bcm5974 *dev = input_get_drvdata(input);
503
504 mutex_lock(&dev->pm_mutex);
505
506 bcm5974_pause_traffic(dev);
507 dev->opened = 0;
508
509 mutex_unlock(&dev->pm_mutex);
510
511 usb_autopm_put_interface(dev->intf);
512}
513
514static int bcm5974_suspend(struct usb_interface *iface, pm_message_t message)
515{
516 struct bcm5974 *dev = usb_get_intfdata(iface);
517
518 mutex_lock(&dev->pm_mutex);
519
520 if (dev->opened)
521 bcm5974_pause_traffic(dev);
522
523 mutex_unlock(&dev->pm_mutex);
524
525 return 0;
526}
527
528static int bcm5974_resume(struct usb_interface *iface)
529{
530 struct bcm5974 *dev = usb_get_intfdata(iface);
531 int error = 0;
532
533 mutex_lock(&dev->pm_mutex);
534
535 if (dev->opened)
536 error = bcm5974_start_traffic(dev);
537
538 mutex_unlock(&dev->pm_mutex);
539
540 return error;
541}
542
543static int bcm5974_probe(struct usb_interface *iface,
544 const struct usb_device_id *id)
545{
546 struct usb_device *udev = interface_to_usbdev(iface);
547 const struct bcm5974_config *cfg;
548 struct bcm5974 *dev;
549 struct input_dev *input_dev;
550 int error = -ENOMEM;
551
552 /* find the product index */
553 cfg = bcm5974_get_config(udev);
554
555 /* allocate memory for our device state and initialize it */
556 dev = kzalloc(sizeof(struct bcm5974), GFP_KERNEL);
557 input_dev = input_allocate_device();
558 if (!dev || !input_dev) {
559 err("bcm5974: out of memory");
560 goto err_free_devs;
561 }
562
563 dev->udev = udev;
564 dev->intf = iface;
565 dev->input = input_dev;
566 dev->cfg = *cfg;
567 mutex_init(&dev->pm_mutex);
568
569 /* setup urbs */
570 dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL);
571 if (!dev->bt_urb)
572 goto err_free_devs;
573
574 dev->tp_urb = usb_alloc_urb(0, GFP_KERNEL);
575 if (!dev->tp_urb)
576 goto err_free_bt_urb;
577
578 dev->bt_data = usb_buffer_alloc(dev->udev,
579 dev->cfg.bt_datalen, GFP_KERNEL,
580 &dev->bt_urb->transfer_dma);
581 if (!dev->bt_data)
582 goto err_free_urb;
583
584 dev->tp_data = usb_buffer_alloc(dev->udev,
585 dev->cfg.tp_datalen, GFP_KERNEL,
586 &dev->tp_urb->transfer_dma);
587 if (!dev->tp_data)
588 goto err_free_bt_buffer;
589
590 usb_fill_int_urb(dev->bt_urb, udev,
591 usb_rcvintpipe(udev, cfg->bt_ep),
592 dev->bt_data, dev->cfg.bt_datalen,
593 bcm5974_irq_button, dev, 1);
594
595 usb_fill_int_urb(dev->tp_urb, udev,
596 usb_rcvintpipe(udev, cfg->tp_ep),
597 dev->tp_data, dev->cfg.tp_datalen,
598 bcm5974_irq_trackpad, dev, 1);
599
600 /* create bcm5974 device */
601 usb_make_path(udev, dev->phys, sizeof(dev->phys));
602 strlcat(dev->phys, "/input0", sizeof(dev->phys));
603
604 input_dev->name = "bcm5974";
605 input_dev->phys = dev->phys;
606 usb_to_input_id(dev->udev, &input_dev->id);
607 input_dev->dev.parent = &iface->dev;
608
609 input_set_drvdata(input_dev, dev);
610
611 input_dev->open = bcm5974_open;
612 input_dev->close = bcm5974_close;
613
614 setup_events_to_report(input_dev, cfg);
615
616 error = input_register_device(dev->input);
617 if (error)
618 goto err_free_buffer;
619
620 /* save our data pointer in this interface device */
621 usb_set_intfdata(iface, dev);
622
623 return 0;
624
625err_free_buffer:
626 usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
627 dev->tp_data, dev->tp_urb->transfer_dma);
628err_free_bt_buffer:
629 usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
630 dev->bt_data, dev->bt_urb->transfer_dma);
631err_free_urb:
632 usb_free_urb(dev->tp_urb);
633err_free_bt_urb:
634 usb_free_urb(dev->bt_urb);
635err_free_devs:
636 usb_set_intfdata(iface, NULL);
637 input_free_device(input_dev);
638 kfree(dev);
639 return error;
640}
641
642static void bcm5974_disconnect(struct usb_interface *iface)
643{
644 struct bcm5974 *dev = usb_get_intfdata(iface);
645
646 usb_set_intfdata(iface, NULL);
647
648 input_unregister_device(dev->input);
649 usb_buffer_free(dev->udev, dev->cfg.tp_datalen,
650 dev->tp_data, dev->tp_urb->transfer_dma);
651 usb_buffer_free(dev->udev, dev->cfg.bt_datalen,
652 dev->bt_data, dev->bt_urb->transfer_dma);
653 usb_free_urb(dev->tp_urb);
654 usb_free_urb(dev->bt_urb);
655 kfree(dev);
656}
657
658static struct usb_driver bcm5974_driver = {
659 .name = "bcm5974",
660 .probe = bcm5974_probe,
661 .disconnect = bcm5974_disconnect,
662 .suspend = bcm5974_suspend,
663 .resume = bcm5974_resume,
664 .reset_resume = bcm5974_resume,
665 .id_table = bcm5974_table,
666 .supports_autosuspend = 1,
667};
668
669static int __init bcm5974_init(void)
670{
671 return usb_register(&bcm5974_driver);
672}
673
674static void __exit bcm5974_exit(void)
675{
676 usb_deregister(&bcm5974_driver);
677}
678
679module_init(bcm5974_init);
680module_exit(bcm5974_exit);
681
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 66bafe308b0c..692a79ec2a22 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -1,10 +1,11 @@
1#ifndef _I8042_SPARCIO_H 1#ifndef _I8042_SPARCIO_H
2#define _I8042_SPARCIO_H 2#define _I8042_SPARCIO_H
3 3
4#include <linux/of_device.h>
5
4#include <asm/io.h> 6#include <asm/io.h>
5#include <asm/oplib.h> 7#include <asm/oplib.h>
6#include <asm/prom.h> 8#include <asm/prom.h>
7#include <asm/of_device.h>
8 9
9static int i8042_kbd_irq = -1; 10static int i8042_kbd_irq = -1;
10static int i8042_aux_irq = -1; 11static int i8042_aux_irq = -1;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index fe732a574ec2..3282b741e246 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -394,6 +394,13 @@ static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = {
394 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), 394 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
395 }, 395 },
396 }, 396 },
397 {
398 .ident = "Acer TravelMate 4280",
399 .matches = {
400 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
401 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
402 },
403 },
397 { } 404 { }
398}; 405};
399 406
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 0ed044d5e685..765007899d9a 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -269,8 +269,8 @@ static int xps2_setup(struct device *dev, struct resource *regs_res,
269 * we have the PS2 in a good state */ 269 * we have the PS2 in a good state */
270 out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET); 270 out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET);
271 271
272 dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%08X, irq=%d\n", 272 dev_info(dev, "Xilinx PS2 at 0x%08X mapped to 0x%p, irq=%d\n",
273 drvdata->phys_addr, (u32)drvdata->base_address, drvdata->irq); 273 drvdata->phys_addr, drvdata->base_address, drvdata->irq);
274 274
275 serio = &drvdata->serio; 275 serio = &drvdata->serio;
276 serio->id.type = SERIO_8042; 276 serio->id.type = SERIO_8042;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 6e60a97a234c..25287e80e236 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -249,29 +249,26 @@ config TOUCHSCREEN_WM97XX
249config TOUCHSCREEN_WM9705 249config TOUCHSCREEN_WM9705
250 bool "WM9705 Touchscreen interface support" 250 bool "WM9705 Touchscreen interface support"
251 depends on TOUCHSCREEN_WM97XX 251 depends on TOUCHSCREEN_WM97XX
252 default y
252 help 253 help
253 Say Y here if you have a Wolfson Microelectronics WM9705 254 Say Y here to enable support for the Wolfson Microelectronics
254 touchscreen controller connected to your system. 255 WM9705 touchscreen controller.
255
256 If unsure, say N.
257 256
258config TOUCHSCREEN_WM9712 257config TOUCHSCREEN_WM9712
259 bool "WM9712 Touchscreen interface support" 258 bool "WM9712 Touchscreen interface support"
260 depends on TOUCHSCREEN_WM97XX 259 depends on TOUCHSCREEN_WM97XX
260 default y
261 help 261 help
262 Say Y here if you have a Wolfson Microelectronics WM9712 262 Say Y here to enable support for the Wolfson Microelectronics
263 touchscreen controller connected to your system. 263 WM9712 touchscreen controller.
264
265 If unsure, say N.
266 264
267config TOUCHSCREEN_WM9713 265config TOUCHSCREEN_WM9713
268 bool "WM9713 Touchscreen interface support" 266 bool "WM9713 Touchscreen interface support"
269 depends on TOUCHSCREEN_WM97XX 267 depends on TOUCHSCREEN_WM97XX
268 default y
270 help 269 help
271 Say Y here if you have a Wolfson Microelectronics WM9713 touchscreen 270 Say Y here to enable support for the Wolfson Microelectronics
272 controller connected to your system. 271 WM9713 touchscreen controller.
273
274 If unsure, say N.
275 272
276config TOUCHSCREEN_WM97XX_MAINSTONE 273config TOUCHSCREEN_WM97XX_MAINSTONE
277 tristate "WM97xx Mainstone accelerated touch" 274 tristate "WM97xx Mainstone accelerated touch"
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index d93500f24fbb..81d0c6053447 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -108,9 +108,8 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
108} 108}
109/*:*/ 109/*:*/
110 110
111/*M:014 get_pfn is slow; it takes the mmap sem and calls get_user_pages. We 111/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as
112 * could probably try to grab batches of pages here as an optimization 112 * an optimization (ie. pre-faulting). :*/
113 * (ie. pre-faulting). :*/
114 113
115/*H:350 This routine takes a page number given by the Guest and converts it to 114/*H:350 This routine takes a page number given by the Guest and converts it to
116 * an actual, physical page number. It can fail for several reasons: the 115 * an actual, physical page number. It can fail for several reasons: the
@@ -123,19 +122,13 @@ static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr)
123static unsigned long get_pfn(unsigned long virtpfn, int write) 122static unsigned long get_pfn(unsigned long virtpfn, int write)
124{ 123{
125 struct page *page; 124 struct page *page;
126 /* This value indicates failure. */
127 unsigned long ret = -1UL;
128 125
129 /* get_user_pages() is a complex interface: it gets the "struct 126 /* gup me one page at this address please! */
130 * vm_area_struct" and "struct page" assocated with a range of pages. 127 if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
131 * It also needs the task's mmap_sem held, and is not very quick. 128 return page_to_pfn(page);
132 * It returns the number of pages it got. */ 129
133 down_read(&current->mm->mmap_sem); 130 /* This value indicates failure. */
134 if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT, 131 return -1UL;
135 1, write, 1, &page, NULL) == 1)
136 ret = page_to_pfn(page);
137 up_read(&current->mm->mmap_sem);
138 return ret;
139} 132}
140 133
141/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table 134/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
@@ -174,7 +167,7 @@ static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
174/*H:460 And to complete the chain, release_pte() looks like this: */ 167/*H:460 And to complete the chain, release_pte() looks like this: */
175static void release_pte(pte_t pte) 168static void release_pte(pte_t pte)
176{ 169{
177 /* Remember that get_user_pages() took a reference to the page, in 170 /* Remember that get_user_pages_fast() took a reference to the page, in
178 * get_pfn()? We have to put it back now. */ 171 * get_pfn()? We have to put it back now. */
179 if (pte_flags(pte) & _PAGE_PRESENT) 172 if (pte_flags(pte) & _PAGE_PRESENT)
180 put_page(pfn_to_page(pte_pfn(pte))); 173 put_page(pfn_to_page(pte_pfn(pte)));
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c7aae66c6f9b..8cfadc5bd2ba 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2393,6 +2393,8 @@ static void analyze_sbs(mddev_t * mddev)
2393 2393
2394} 2394}
2395 2395
2396static void md_safemode_timeout(unsigned long data);
2397
2396static ssize_t 2398static ssize_t
2397safe_delay_show(mddev_t *mddev, char *page) 2399safe_delay_show(mddev_t *mddev, char *page)
2398{ 2400{
@@ -2432,9 +2434,12 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2432 if (msec == 0) 2434 if (msec == 0)
2433 mddev->safemode_delay = 0; 2435 mddev->safemode_delay = 0;
2434 else { 2436 else {
2437 unsigned long old_delay = mddev->safemode_delay;
2435 mddev->safemode_delay = (msec*HZ)/1000; 2438 mddev->safemode_delay = (msec*HZ)/1000;
2436 if (mddev->safemode_delay == 0) 2439 if (mddev->safemode_delay == 0)
2437 mddev->safemode_delay = 1; 2440 mddev->safemode_delay = 1;
2441 if (mddev->safemode_delay < old_delay)
2442 md_safemode_timeout((unsigned long)mddev);
2438 } 2443 }
2439 return len; 2444 return len;
2440} 2445}
@@ -4634,6 +4639,11 @@ static int update_size(mddev_t *mddev, sector_t num_sectors)
4634 */ 4639 */
4635 if (mddev->sync_thread) 4640 if (mddev->sync_thread)
4636 return -EBUSY; 4641 return -EBUSY;
4642 if (mddev->bitmap)
4643 /* Sorry, cannot grow a bitmap yet, just remove it,
4644 * grow, and re-add.
4645 */
4646 return -EBUSY;
4637 rdev_for_each(rdev, tmp, mddev) { 4647 rdev_for_each(rdev, tmp, mddev) {
4638 sector_t avail; 4648 sector_t avail;
4639 avail = rdev->size * 2; 4649 avail = rdev->size * 2;
@@ -5993,7 +6003,7 @@ static int remove_and_add_spares(mddev_t *mddev)
5993 } 6003 }
5994 } 6004 }
5995 6005
5996 if (mddev->degraded) { 6006 if (mddev->degraded && ! mddev->ro) {
5997 rdev_for_each(rdev, rtmp, mddev) { 6007 rdev_for_each(rdev, rtmp, mddev) {
5998 if (rdev->raid_disk >= 0 && 6008 if (rdev->raid_disk >= 0 &&
5999 !test_bit(In_sync, &rdev->flags) && 6009 !test_bit(In_sync, &rdev->flags) &&
@@ -6067,6 +6077,8 @@ void md_check_recovery(mddev_t *mddev)
6067 flush_signals(current); 6077 flush_signals(current);
6068 } 6078 }
6069 6079
6080 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
6081 return;
6070 if ( ! ( 6082 if ( ! (
6071 (mddev->flags && !mddev->external) || 6083 (mddev->flags && !mddev->external) ||
6072 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 6084 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
@@ -6080,6 +6092,15 @@ void md_check_recovery(mddev_t *mddev)
6080 if (mddev_trylock(mddev)) { 6092 if (mddev_trylock(mddev)) {
6081 int spares = 0; 6093 int spares = 0;
6082 6094
6095 if (mddev->ro) {
6096 /* Only thing we do on a ro array is remove
6097 * failed devices.
6098 */
6099 remove_and_add_spares(mddev);
6100 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6101 goto unlock;
6102 }
6103
6083 if (!mddev->external) { 6104 if (!mddev->external) {
6084 int did_change = 0; 6105 int did_change = 0;
6085 spin_lock_irq(&mddev->write_lock); 6106 spin_lock_irq(&mddev->write_lock);
@@ -6117,7 +6138,8 @@ void md_check_recovery(mddev_t *mddev)
6117 /* resync has finished, collect result */ 6138 /* resync has finished, collect result */
6118 md_unregister_thread(mddev->sync_thread); 6139 md_unregister_thread(mddev->sync_thread);
6119 mddev->sync_thread = NULL; 6140 mddev->sync_thread = NULL;
6120 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6141 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
6142 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
6121 /* success...*/ 6143 /* success...*/
6122 /* activate any spares */ 6144 /* activate any spares */
6123 if (mddev->pers->spare_active(mddev)) 6145 if (mddev->pers->spare_active(mddev))
@@ -6169,6 +6191,7 @@ void md_check_recovery(mddev_t *mddev)
6169 } else if ((spares = remove_and_add_spares(mddev))) { 6191 } else if ((spares = remove_and_add_spares(mddev))) {
6170 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6192 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
6171 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6193 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
6194 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
6172 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6195 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
6173 } else if (mddev->recovery_cp < MaxSector) { 6196 } else if (mddev->recovery_cp < MaxSector) {
6174 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6197 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
@@ -6232,7 +6255,11 @@ static int md_notify_reboot(struct notifier_block *this,
6232 6255
6233 for_each_mddev(mddev, tmp) 6256 for_each_mddev(mddev, tmp)
6234 if (mddev_trylock(mddev)) { 6257 if (mddev_trylock(mddev)) {
6235 do_md_stop (mddev, 1, 0); 6258 /* Force a switch to readonly even array
6259 * appears to still be in use. Hence
6260 * the '100'.
6261 */
6262 do_md_stop (mddev, 1, 100);
6236 mddev_unlock(mddev); 6263 mddev_unlock(mddev);
6237 } 6264 }
6238 /* 6265 /*
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d41bebb6da0f..e34cd0e62473 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -76,11 +76,13 @@ static void r10bio_pool_free(void *r10_bio, void *data)
76 kfree(r10_bio); 76 kfree(r10_bio);
77} 77}
78 78
79/* Maximum size of each resync request */
79#define RESYNC_BLOCK_SIZE (64*1024) 80#define RESYNC_BLOCK_SIZE (64*1024)
80//#define RESYNC_BLOCK_SIZE PAGE_SIZE
81#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
82#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 81#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
83#define RESYNC_WINDOW (2048*1024) 82/* amount of memory to reserve for resync requests */
83#define RESYNC_WINDOW (1024*1024)
84/* maximum number of concurrent requests, memory permitting */
85#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
84 86
85/* 87/*
86 * When performing a resync, we need to read and compare, so 88 * When performing a resync, we need to read and compare, so
@@ -690,7 +692,6 @@ static int flush_pending_writes(conf_t *conf)
690 * there is no normal IO happeing. It must arrange to call 692 * there is no normal IO happeing. It must arrange to call
691 * lower_barrier when the particular background IO completes. 693 * lower_barrier when the particular background IO completes.
692 */ 694 */
693#define RESYNC_DEPTH 32
694 695
695static void raise_barrier(conf_t *conf, int force) 696static void raise_barrier(conf_t *conf, int force)
696{ 697{
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 40e939675657..224de022e7c5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2568,10 +2568,10 @@ static bool handle_stripe5(struct stripe_head *sh)
2568 if (dev->written) 2568 if (dev->written)
2569 s.written++; 2569 s.written++;
2570 rdev = rcu_dereference(conf->disks[i].rdev); 2570 rdev = rcu_dereference(conf->disks[i].rdev);
2571 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2571 if (blocked_rdev == NULL &&
2572 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
2572 blocked_rdev = rdev; 2573 blocked_rdev = rdev;
2573 atomic_inc(&rdev->nr_pending); 2574 atomic_inc(&rdev->nr_pending);
2574 break;
2575 } 2575 }
2576 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2576 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
2577 /* The ReadError flag will just be confusing now */ 2577 /* The ReadError flag will just be confusing now */
@@ -2588,8 +2588,14 @@ static bool handle_stripe5(struct stripe_head *sh)
2588 rcu_read_unlock(); 2588 rcu_read_unlock();
2589 2589
2590 if (unlikely(blocked_rdev)) { 2590 if (unlikely(blocked_rdev)) {
2591 set_bit(STRIPE_HANDLE, &sh->state); 2591 if (s.syncing || s.expanding || s.expanded ||
2592 goto unlock; 2592 s.to_write || s.written) {
2593 set_bit(STRIPE_HANDLE, &sh->state);
2594 goto unlock;
2595 }
2596 /* There is nothing for the blocked_rdev to block */
2597 rdev_dec_pending(blocked_rdev, conf->mddev);
2598 blocked_rdev = NULL;
2593 } 2599 }
2594 2600
2595 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 2601 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
@@ -2832,10 +2838,10 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2832 if (dev->written) 2838 if (dev->written)
2833 s.written++; 2839 s.written++;
2834 rdev = rcu_dereference(conf->disks[i].rdev); 2840 rdev = rcu_dereference(conf->disks[i].rdev);
2835 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2841 if (blocked_rdev == NULL &&
2842 rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
2836 blocked_rdev = rdev; 2843 blocked_rdev = rdev;
2837 atomic_inc(&rdev->nr_pending); 2844 atomic_inc(&rdev->nr_pending);
2838 break;
2839 } 2845 }
2840 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2846 if (!rdev || !test_bit(In_sync, &rdev->flags)) {
2841 /* The ReadError flag will just be confusing now */ 2847 /* The ReadError flag will just be confusing now */
@@ -2853,9 +2859,16 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
2853 rcu_read_unlock(); 2859 rcu_read_unlock();
2854 2860
2855 if (unlikely(blocked_rdev)) { 2861 if (unlikely(blocked_rdev)) {
2856 set_bit(STRIPE_HANDLE, &sh->state); 2862 if (s.syncing || s.expanding || s.expanded ||
2857 goto unlock; 2863 s.to_write || s.written) {
2864 set_bit(STRIPE_HANDLE, &sh->state);
2865 goto unlock;
2866 }
2867 /* There is nothing for the blocked_rdev to block */
2868 rdev_dec_pending(blocked_rdev, conf->mddev);
2869 blocked_rdev = NULL;
2858 } 2870 }
2871
2859 pr_debug("locked=%d uptodate=%d to_read=%d" 2872 pr_debug("locked=%d uptodate=%d to_read=%d"
2860 " to_write=%d failed=%d failed_num=%d,%d\n", 2873 " to_write=%d failed=%d failed_num=%d,%d\n",
2861 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 2874 s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
@@ -4446,6 +4459,9 @@ static int raid5_check_reshape(mddev_t *mddev)
4446 return -EINVAL; /* Cannot shrink array or change level yet */ 4459 return -EINVAL; /* Cannot shrink array or change level yet */
4447 if (mddev->delta_disks == 0) 4460 if (mddev->delta_disks == 0)
4448 return 0; /* nothing to do */ 4461 return 0; /* nothing to do */
4462 if (mddev->bitmap)
4463 /* Cannot grow a bitmap yet */
4464 return -EBUSY;
4449 4465
4450 /* Can only proceed if there are plenty of stripe_heads. 4466 /* Can only proceed if there are plenty of stripe_heads.
4451 * We need a minimum of one full stripe,, and for sensible progress 4467 * We need a minimum of one full stripe,, and for sensible progress
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 883e7ea31de2..10c44d3fe01a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -50,10 +50,31 @@ config HTC_PASIC3
50 HTC Magician devices, respectively. Actual functionality is 50 HTC Magician devices, respectively. Actual functionality is
51 handled by the leds-pasic3 and ds1wm drivers. 51 handled by the leds-pasic3 and ds1wm drivers.
52 52
53config MFD_TMIO
54 bool
55 default n
56
57config MFD_T7L66XB
58 bool "Support Toshiba T7L66XB"
59 depends on ARM
60 select MFD_CORE
61 select MFD_TMIO
62 help
63 Support for Toshiba Mobile IO Controller T7L66XB
64
65config MFD_TC6387XB
66 bool "Support Toshiba TC6387XB"
67 depends on ARM
68 select MFD_CORE
69 select MFD_TMIO
70 help
71 Support for Toshiba Mobile IO Controller TC6387XB
72
53config MFD_TC6393XB 73config MFD_TC6393XB
54 bool "Support Toshiba TC6393XB" 74 bool "Support Toshiba TC6393XB"
55 depends on GPIOLIB && ARM 75 depends on GPIOLIB && ARM
56 select MFD_CORE 76 select MFD_CORE
77 select MFD_TMIO
57 help 78 help
58 Support for Toshiba Mobile IO Controller TC6393XB 79 Support for Toshiba Mobile IO Controller TC6393XB
59 80
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 33daa2f45dd8..03ad239ecef0 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_MFD_ASIC3) += asic3.o
8obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o 8obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
9obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o 9obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
10 10
11obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o
12obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o
11obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o 13obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o
12 14
13obj-$(CONFIG_MFD_CORE) += mfd-core.o 15obj-$(CONFIG_MFD_CORE) += mfd-core.o
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
new file mode 100644
index 000000000000..49a0fffc02af
--- /dev/null
+++ b/drivers/mfd/t7l66xb.c
@@ -0,0 +1,419 @@
1/*
2 *
3 * Toshiba T7L66XB core mfd support
4 *
5 * Copyright (c) 2005, 2007, 2008 Ian Molton
6 * Copyright (c) 2008 Dmitry Baryshkov
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * T7L66 features:
13 *
14 * Supported in this driver:
15 * SD/MMC
16 * SM/NAND flash controller
17 *
18 * As yet not supported
19 * GPIO interface (on NAND pins)
20 * Serial interface
21 * TFT 'interface converter'
22 * PCMCIA interface logic
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/platform_device.h>
30#include <linux/mfd/core.h>
31#include <linux/mfd/tmio.h>
32#include <linux/mfd/t7l66xb.h>
33
34enum {
35 T7L66XB_CELL_NAND,
36 T7L66XB_CELL_MMC,
37};
38
39#define SCR_REVID 0x08 /* b Revision ID */
40#define SCR_IMR 0x42 /* b Interrupt Mask */
41#define SCR_DEV_CTL 0xe0 /* b Device control */
42#define SCR_ISR 0xe1 /* b Interrupt Status */
43#define SCR_GPO_OC 0xf0 /* b GPO output control */
44#define SCR_GPO_OS 0xf1 /* b GPO output enable */
45#define SCR_GPI_S 0xf2 /* w GPI status */
46#define SCR_APDC 0xf8 /* b Active pullup down ctrl */
47
48#define SCR_DEV_CTL_USB BIT(0) /* USB enable */
49#define SCR_DEV_CTL_MMC BIT(1) /* MMC enable */
50
51/*--------------------------------------------------------------------------*/
52
53struct t7l66xb {
54 void __iomem *scr;
55 /* Lock to protect registers requiring read/modify/write ops. */
56 spinlock_t lock;
57
58 struct resource rscr;
59 int irq;
60 int irq_base;
61};
62
63/*--------------------------------------------------------------------------*/
64
65static int t7l66xb_mmc_enable(struct platform_device *mmc)
66{
67 struct platform_device *dev = to_platform_device(mmc->dev.parent);
68 struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
69 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
70 unsigned long flags;
71 u8 dev_ctl;
72
73 if (pdata->enable_clk32k)
74 pdata->enable_clk32k(dev);
75
76 spin_lock_irqsave(&t7l66xb->lock, flags);
77
78 dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL);
79 dev_ctl |= SCR_DEV_CTL_MMC;
80 tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL);
81
82 spin_unlock_irqrestore(&t7l66xb->lock, flags);
83
84 return 0;
85}
86
87static int t7l66xb_mmc_disable(struct platform_device *mmc)
88{
89 struct platform_device *dev = to_platform_device(mmc->dev.parent);
90 struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
91 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
92 unsigned long flags;
93 u8 dev_ctl;
94
95 spin_lock_irqsave(&t7l66xb->lock, flags);
96
97 dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL);
98 dev_ctl &= ~SCR_DEV_CTL_MMC;
99 tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL);
100
101 spin_unlock_irqrestore(&t7l66xb->lock, flags);
102
103 if (pdata->disable_clk32k)
104 pdata->disable_clk32k(dev);
105
106 return 0;
107}
108
109/*--------------------------------------------------------------------------*/
110
111const static struct resource t7l66xb_mmc_resources[] = {
112 {
113 .start = 0x800,
114 .end = 0x9ff,
115 .flags = IORESOURCE_MEM,
116 },
117 {
118 .start = 0x200,
119 .end = 0x2ff,
120 .flags = IORESOURCE_MEM,
121 },
122 {
123 .start = IRQ_T7L66XB_MMC,
124 .end = IRQ_T7L66XB_MMC,
125 .flags = IORESOURCE_IRQ,
126 },
127};
128
129const static struct resource t7l66xb_nand_resources[] = {
130 {
131 .start = 0xc00,
132 .end = 0xc07,
133 .flags = IORESOURCE_MEM,
134 },
135 {
136 .start = 0x0100,
137 .end = 0x01ff,
138 .flags = IORESOURCE_MEM,
139 },
140 {
141 .start = IRQ_T7L66XB_NAND,
142 .end = IRQ_T7L66XB_NAND,
143 .flags = IORESOURCE_IRQ,
144 },
145};
146
147static struct mfd_cell t7l66xb_cells[] = {
148 [T7L66XB_CELL_MMC] = {
149 .name = "tmio-mmc",
150 .enable = t7l66xb_mmc_enable,
151 .disable = t7l66xb_mmc_disable,
152 .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources),
153 .resources = t7l66xb_mmc_resources,
154 },
155 [T7L66XB_CELL_NAND] = {
156 .name = "tmio-nand",
157 .num_resources = ARRAY_SIZE(t7l66xb_nand_resources),
158 .resources = t7l66xb_nand_resources,
159 },
160};
161
162/*--------------------------------------------------------------------------*/
163
164/* Handle the T7L66XB interrupt mux */
165static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
166{
167 struct t7l66xb *t7l66xb = get_irq_data(irq);
168 unsigned int isr;
169 unsigned int i, irq_base;
170
171 irq_base = t7l66xb->irq_base;
172
173 while ((isr = tmio_ioread8(t7l66xb->scr + SCR_ISR) &
174 ~tmio_ioread8(t7l66xb->scr + SCR_IMR)))
175 for (i = 0; i < T7L66XB_NR_IRQS; i++)
176 if (isr & (1 << i))
177 generic_handle_irq(irq_base + i);
178}
179
180static void t7l66xb_irq_mask(unsigned int irq)
181{
182 struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
183 unsigned long flags;
184 u8 imr;
185
186 spin_lock_irqsave(&t7l66xb->lock, flags);
187 imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
188 imr |= 1 << (irq - t7l66xb->irq_base);
189 tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
190 spin_unlock_irqrestore(&t7l66xb->lock, flags);
191}
192
193static void t7l66xb_irq_unmask(unsigned int irq)
194{
195 struct t7l66xb *t7l66xb = get_irq_chip_data(irq);
196 unsigned long flags;
197 u8 imr;
198
199 spin_lock_irqsave(&t7l66xb->lock, flags);
200 imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
201 imr &= ~(1 << (irq - t7l66xb->irq_base));
202 tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
203 spin_unlock_irqrestore(&t7l66xb->lock, flags);
204}
205
206static struct irq_chip t7l66xb_chip = {
207 .name = "t7l66xb",
208 .ack = t7l66xb_irq_mask,
209 .mask = t7l66xb_irq_mask,
210 .unmask = t7l66xb_irq_unmask,
211};
212
213/*--------------------------------------------------------------------------*/
214
215/* Install the IRQ handler */
216static void t7l66xb_attach_irq(struct platform_device *dev)
217{
218 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
219 unsigned int irq, irq_base;
220
221 irq_base = t7l66xb->irq_base;
222
223 for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
224 set_irq_chip(irq, &t7l66xb_chip);
225 set_irq_chip_data(irq, t7l66xb);
226 set_irq_handler(irq, handle_level_irq);
227#ifdef CONFIG_ARM
228 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
229#endif
230 }
231
232 set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING);
233 set_irq_data(t7l66xb->irq, t7l66xb);
234 set_irq_chained_handler(t7l66xb->irq, t7l66xb_irq);
235}
236
237static void t7l66xb_detach_irq(struct platform_device *dev)
238{
239 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
240 unsigned int irq, irq_base;
241
242 irq_base = t7l66xb->irq_base;
243
244 set_irq_chained_handler(t7l66xb->irq, NULL);
245 set_irq_data(t7l66xb->irq, NULL);
246
247 for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
248#ifdef CONFIG_ARM
249 set_irq_flags(irq, 0);
250#endif
251 set_irq_chip(irq, NULL);
252 set_irq_chip_data(irq, NULL);
253 }
254}
255
256/*--------------------------------------------------------------------------*/
257
258#ifdef CONFIG_PM
259static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state)
260{
261 struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
262
263 if (pdata && pdata->suspend)
264 pdata->suspend(dev);
265
266 return 0;
267}
268
269static int t7l66xb_resume(struct platform_device *dev)
270{
271 struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
272
273 if (pdata && pdata->resume)
274 pdata->resume(dev);
275
276 return 0;
277}
278#else
279#define t7l66xb_suspend NULL
280#define t7l66xb_resume NULL
281#endif
282
283/*--------------------------------------------------------------------------*/
284
285static int t7l66xb_probe(struct platform_device *dev)
286{
287 struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
288 struct t7l66xb *t7l66xb;
289 struct resource *iomem, *rscr;
290 int ret;
291
292 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
293 if (!iomem)
294 return -EINVAL;
295
296 t7l66xb = kzalloc(sizeof *t7l66xb, GFP_KERNEL);
297 if (!t7l66xb)
298 return -ENOMEM;
299
300 spin_lock_init(&t7l66xb->lock);
301
302 platform_set_drvdata(dev, t7l66xb);
303
304 ret = platform_get_irq(dev, 0);
305 if (ret >= 0)
306 t7l66xb->irq = ret;
307 else
308 goto err_noirq;
309
310 t7l66xb->irq_base = pdata->irq_base;
311
312 rscr = &t7l66xb->rscr;
313 rscr->name = "t7l66xb-core";
314 rscr->start = iomem->start;
315 rscr->end = iomem->start + 0xff;
316 rscr->flags = IORESOURCE_MEM;
317
318 ret = request_resource(iomem, rscr);
319 if (ret)
320 goto err_request_scr;
321
322 t7l66xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
323 if (!t7l66xb->scr) {
324 ret = -ENOMEM;
325 goto err_ioremap;
326 }
327
328 if (pdata && pdata->enable)
329 pdata->enable(dev);
330
331 /* Mask all interrupts */
332 tmio_iowrite8(0xbf, t7l66xb->scr + SCR_IMR);
333
334 printk(KERN_INFO "%s rev %d @ 0x%08lx, irq %d\n",
335 dev->name, tmio_ioread8(t7l66xb->scr + SCR_REVID),
336 (unsigned long)iomem->start, t7l66xb->irq);
337
338 t7l66xb_attach_irq(dev);
339
340 t7l66xb_cells[T7L66XB_CELL_NAND].driver_data = pdata->nand_data;
341 t7l66xb_cells[T7L66XB_CELL_NAND].platform_data =
342 &t7l66xb_cells[T7L66XB_CELL_NAND];
343 t7l66xb_cells[T7L66XB_CELL_NAND].data_size =
344 sizeof(t7l66xb_cells[T7L66XB_CELL_NAND]);
345
346 t7l66xb_cells[T7L66XB_CELL_MMC].platform_data =
347 &t7l66xb_cells[T7L66XB_CELL_MMC];
348 t7l66xb_cells[T7L66XB_CELL_MMC].data_size =
349 sizeof(t7l66xb_cells[T7L66XB_CELL_MMC]);
350
351 ret = mfd_add_devices(&dev->dev, dev->id,
352 t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells),
353 iomem, t7l66xb->irq_base);
354
355 if (!ret)
356 return 0;
357
358 t7l66xb_detach_irq(dev);
359 iounmap(t7l66xb->scr);
360err_ioremap:
361 release_resource(&t7l66xb->rscr);
362err_noirq:
363err_request_scr:
364 kfree(t7l66xb);
365 return ret;
366}
367
368static int t7l66xb_remove(struct platform_device *dev)
369{
370 struct t7l66xb_platform_data *pdata = dev->dev.platform_data;
371 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
372 int ret;
373
374 ret = pdata->disable(dev);
375
376 t7l66xb_detach_irq(dev);
377 iounmap(t7l66xb->scr);
378 release_resource(&t7l66xb->rscr);
379 mfd_remove_devices(&dev->dev);
380 platform_set_drvdata(dev, NULL);
381 kfree(t7l66xb);
382
383 return ret;
384
385}
386
387static struct platform_driver t7l66xb_platform_driver = {
388 .driver = {
389 .name = "t7l66xb",
390 .owner = THIS_MODULE,
391 },
392 .suspend = t7l66xb_suspend,
393 .resume = t7l66xb_resume,
394 .probe = t7l66xb_probe,
395 .remove = t7l66xb_remove,
396};
397
398/*--------------------------------------------------------------------------*/
399
400static int __init t7l66xb_init(void)
401{
402 int retval = 0;
403
404 retval = platform_driver_register(&t7l66xb_platform_driver);
405 return retval;
406}
407
408static void __exit t7l66xb_exit(void)
409{
410 platform_driver_unregister(&t7l66xb_platform_driver);
411}
412
413module_init(t7l66xb_init);
414module_exit(t7l66xb_exit);
415
416MODULE_DESCRIPTION("Toshiba T7L66XB core driver");
417MODULE_LICENSE("GPL v2");
418MODULE_AUTHOR("Ian Molton");
419MODULE_ALIAS("platform:t7l66xb");
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
new file mode 100644
index 000000000000..a22b21ac6cf8
--- /dev/null
+++ b/drivers/mfd/tc6387xb.c
@@ -0,0 +1,181 @@
1/*
2 * Toshiba TC6387XB support
3 * Copyright (c) 2005 Ian Molton
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This file contains TC6387XB base support.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/err.h>
16#include <linux/mfd/core.h>
17#include <linux/mfd/tmio.h>
18#include <linux/mfd/tc6387xb.h>
19
20enum {
21 TC6387XB_CELL_MMC,
22};
23
24#ifdef CONFIG_PM
25static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
26{
27 struct tc6387xb_platform_data *pdata = platform_get_drvdata(dev);
28
29 if (pdata && pdata->suspend)
30 pdata->suspend(dev);
31
32 return 0;
33}
34
35static int tc6387xb_resume(struct platform_device *dev)
36{
37 struct tc6387xb_platform_data *pdata = platform_get_drvdata(dev);
38
39 if (pdata && pdata->resume)
40 pdata->resume(dev);
41
42 return 0;
43}
44#else
45#define tc6387xb_suspend NULL
46#define tc6387xb_resume NULL
47#endif
48
49/*--------------------------------------------------------------------------*/
50
51static int tc6387xb_mmc_enable(struct platform_device *mmc)
52{
53 struct platform_device *dev = to_platform_device(mmc->dev.parent);
54 struct tc6387xb_platform_data *tc6387xb = dev->dev.platform_data;
55
56 if (tc6387xb->enable_clk32k)
57 tc6387xb->enable_clk32k(dev);
58
59 return 0;
60}
61
62static int tc6387xb_mmc_disable(struct platform_device *mmc)
63{
64 struct platform_device *dev = to_platform_device(mmc->dev.parent);
65 struct tc6387xb_platform_data *tc6387xb = dev->dev.platform_data;
66
67 if (tc6387xb->disable_clk32k)
68 tc6387xb->disable_clk32k(dev);
69
70 return 0;
71}
72
73/*--------------------------------------------------------------------------*/
74
75static struct resource tc6387xb_mmc_resources[] = {
76 {
77 .start = 0x800,
78 .end = 0x9ff,
79 .flags = IORESOURCE_MEM,
80 },
81 {
82 .start = 0x200,
83 .end = 0x2ff,
84 .flags = IORESOURCE_MEM,
85 },
86 {
87 .start = 0,
88 .end = 0,
89 .flags = IORESOURCE_IRQ,
90 },
91};
92
93static struct mfd_cell tc6387xb_cells[] = {
94 [TC6387XB_CELL_MMC] = {
95 .name = "tmio-mmc",
96 .enable = tc6387xb_mmc_enable,
97 .disable = tc6387xb_mmc_disable,
98 .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
99 .resources = tc6387xb_mmc_resources,
100 },
101};
102
103static int tc6387xb_probe(struct platform_device *dev)
104{
105 struct tc6387xb_platform_data *data = platform_get_drvdata(dev);
106 struct resource *iomem;
107 int irq, ret;
108
109 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
110 if (!iomem) {
111 ret = -EINVAL;
112 goto err_resource;
113 }
114
115 ret = platform_get_irq(dev, 0);
116 if (ret >= 0)
117 irq = ret;
118 else
119 goto err_resource;
120
121 if (data && data->enable)
122 data->enable(dev);
123
124 printk(KERN_INFO "Toshiba tc6387xb initialised\n");
125
126 tc6387xb_cells[TC6387XB_CELL_MMC].platform_data =
127 &tc6387xb_cells[TC6387XB_CELL_MMC];
128 tc6387xb_cells[TC6387XB_CELL_MMC].data_size =
129 sizeof(tc6387xb_cells[TC6387XB_CELL_MMC]);
130
131 ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells,
132 ARRAY_SIZE(tc6387xb_cells), iomem, irq);
133
134 if (!ret)
135 return 0;
136
137err_resource:
138 return ret;
139}
140
141static int tc6387xb_remove(struct platform_device *dev)
142{
143 struct tc6387xb_platform_data *data = platform_get_drvdata(dev);
144
145 if (data && data->disable)
146 data->disable(dev);
147
148 /* FIXME - free the resources! */
149
150 return 0;
151}
152
153
154static struct platform_driver tc6387xb_platform_driver = {
155 .driver = {
156 .name = "tc6387xb",
157 },
158 .probe = tc6387xb_probe,
159 .remove = tc6387xb_remove,
160 .suspend = tc6387xb_suspend,
161 .resume = tc6387xb_resume,
162};
163
164
165static int __init tc6387xb_init(void)
166{
167 return platform_driver_register(&tc6387xb_platform_driver);
168}
169
170static void __exit tc6387xb_exit(void)
171{
172 platform_driver_unregister(&tc6387xb_platform_driver);
173}
174
175module_init(tc6387xb_init);
176module_exit(tc6387xb_exit);
177
178MODULE_DESCRIPTION("Toshiba TC6387XB core driver");
179MODULE_LICENSE("GPL v2");
180MODULE_AUTHOR("Ian Molton");
181MODULE_ALIAS("platform:tc6387xb");
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index f4fd797c1590..e4c1c788b5f8 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -19,8 +19,8 @@
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/fb.h>
23#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/err.h>
24#include <linux/mfd/core.h> 24#include <linux/mfd/core.h>
25#include <linux/mfd/tmio.h> 25#include <linux/mfd/tmio.h>
26#include <linux/mfd/tc6393xb.h> 26#include <linux/mfd/tc6393xb.h>
@@ -112,6 +112,7 @@ struct tc6393xb {
112 112
113enum { 113enum {
114 TC6393XB_CELL_NAND, 114 TC6393XB_CELL_NAND,
115 TC6393XB_CELL_MMC,
115}; 116};
116 117
117/*--------------------------------------------------------------------------*/ 118/*--------------------------------------------------------------------------*/
@@ -126,7 +127,7 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
126 127
127 /* SMD buffer on */ 128 /* SMD buffer on */
128 dev_dbg(&dev->dev, "SMD buffer on\n"); 129 dev_dbg(&dev->dev, "SMD buffer on\n");
129 iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1)); 130 tmio_iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
130 131
131 spin_unlock_irqrestore(&tc6393xb->lock, flags); 132 spin_unlock_irqrestore(&tc6393xb->lock, flags);
132 133
@@ -135,25 +136,40 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
135 136
136static struct resource __devinitdata tc6393xb_nand_resources[] = { 137static struct resource __devinitdata tc6393xb_nand_resources[] = {
137 { 138 {
138 .name = TMIO_NAND_CONFIG, 139 .start = 0x1000,
139 .start = 0x0100, 140 .end = 0x1007,
140 .end = 0x01ff,
141 .flags = IORESOURCE_MEM, 141 .flags = IORESOURCE_MEM,
142 }, 142 },
143 { 143 {
144 .name = TMIO_NAND_CONTROL, 144 .start = 0x0100,
145 .start = 0x1000, 145 .end = 0x01ff,
146 .end = 0x1007,
147 .flags = IORESOURCE_MEM, 146 .flags = IORESOURCE_MEM,
148 }, 147 },
149 { 148 {
150 .name = TMIO_NAND_IRQ,
151 .start = IRQ_TC6393_NAND, 149 .start = IRQ_TC6393_NAND,
152 .end = IRQ_TC6393_NAND, 150 .end = IRQ_TC6393_NAND,
153 .flags = IORESOURCE_IRQ, 151 .flags = IORESOURCE_IRQ,
154 }, 152 },
155}; 153};
156 154
155static struct resource __devinitdata tc6393xb_mmc_resources[] = {
156 {
157 .start = 0x800,
158 .end = 0x9ff,
159 .flags = IORESOURCE_MEM,
160 },
161 {
162 .start = 0x200,
163 .end = 0x2ff,
164 .flags = IORESOURCE_MEM,
165 },
166 {
167 .start = IRQ_TC6393_MMC,
168 .end = IRQ_TC6393_MMC,
169 .flags = IORESOURCE_IRQ,
170 },
171};
172
157static struct mfd_cell __devinitdata tc6393xb_cells[] = { 173static struct mfd_cell __devinitdata tc6393xb_cells[] = {
158 [TC6393XB_CELL_NAND] = { 174 [TC6393XB_CELL_NAND] = {
159 .name = "tmio-nand", 175 .name = "tmio-nand",
@@ -161,6 +177,11 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
161 .num_resources = ARRAY_SIZE(tc6393xb_nand_resources), 177 .num_resources = ARRAY_SIZE(tc6393xb_nand_resources),
162 .resources = tc6393xb_nand_resources, 178 .resources = tc6393xb_nand_resources,
163 }, 179 },
180 [TC6393XB_CELL_MMC] = {
181 .name = "tmio-mmc",
182 .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
183 .resources = tc6393xb_mmc_resources,
184 },
164}; 185};
165 186
166/*--------------------------------------------------------------------------*/ 187/*--------------------------------------------------------------------------*/
@@ -171,7 +192,7 @@ static int tc6393xb_gpio_get(struct gpio_chip *chip,
171 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); 192 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
172 193
173 /* XXX: does dsr also represent inputs? */ 194 /* XXX: does dsr also represent inputs? */
174 return ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)) 195 return tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8))
175 & TC_GPIO_BIT(offset); 196 & TC_GPIO_BIT(offset);
176} 197}
177 198
@@ -181,13 +202,13 @@ static void __tc6393xb_gpio_set(struct gpio_chip *chip,
181 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio); 202 struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
182 u8 dsr; 203 u8 dsr;
183 204
184 dsr = ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8)); 205 dsr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8));
185 if (value) 206 if (value)
186 dsr |= TC_GPIO_BIT(offset); 207 dsr |= TC_GPIO_BIT(offset);
187 else 208 else
188 dsr &= ~TC_GPIO_BIT(offset); 209 dsr &= ~TC_GPIO_BIT(offset);
189 210
190 iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8)); 211 tmio_iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8));
191} 212}
192 213
193static void tc6393xb_gpio_set(struct gpio_chip *chip, 214static void tc6393xb_gpio_set(struct gpio_chip *chip,
@@ -212,9 +233,9 @@ static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
212 233
213 spin_lock_irqsave(&tc6393xb->lock, flags); 234 spin_lock_irqsave(&tc6393xb->lock, flags);
214 235
215 doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); 236 doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
216 doecr &= ~TC_GPIO_BIT(offset); 237 doecr &= ~TC_GPIO_BIT(offset);
217 iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); 238 tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
218 239
219 spin_unlock_irqrestore(&tc6393xb->lock, flags); 240 spin_unlock_irqrestore(&tc6393xb->lock, flags);
220 241
@@ -232,9 +253,9 @@ static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
232 253
233 __tc6393xb_gpio_set(chip, offset, value); 254 __tc6393xb_gpio_set(chip, offset, value);
234 255
235 doecr = ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); 256 doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
236 doecr |= TC_GPIO_BIT(offset); 257 doecr |= TC_GPIO_BIT(offset);
237 iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8)); 258 tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
238 259
239 spin_unlock_irqrestore(&tc6393xb->lock, flags); 260 spin_unlock_irqrestore(&tc6393xb->lock, flags);
240 261
@@ -265,8 +286,8 @@ tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
265 286
266 irq_base = tc6393xb->irq_base; 287 irq_base = tc6393xb->irq_base;
267 288
268 while ((isr = ioread8(tc6393xb->scr + SCR_ISR) & 289 while ((isr = tmio_ioread8(tc6393xb->scr + SCR_ISR) &
269 ~ioread8(tc6393xb->scr + SCR_IMR))) 290 ~tmio_ioread8(tc6393xb->scr + SCR_IMR)))
270 for (i = 0; i < TC6393XB_NR_IRQS; i++) { 291 for (i = 0; i < TC6393XB_NR_IRQS; i++) {
271 if (isr & (1 << i)) 292 if (isr & (1 << i))
272 generic_handle_irq(irq_base + i); 293 generic_handle_irq(irq_base + i);
@@ -284,9 +305,9 @@ static void tc6393xb_irq_mask(unsigned int irq)
284 u8 imr; 305 u8 imr;
285 306
286 spin_lock_irqsave(&tc6393xb->lock, flags); 307 spin_lock_irqsave(&tc6393xb->lock, flags);
287 imr = ioread8(tc6393xb->scr + SCR_IMR); 308 imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
288 imr |= 1 << (irq - tc6393xb->irq_base); 309 imr |= 1 << (irq - tc6393xb->irq_base);
289 iowrite8(imr, tc6393xb->scr + SCR_IMR); 310 tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
290 spin_unlock_irqrestore(&tc6393xb->lock, flags); 311 spin_unlock_irqrestore(&tc6393xb->lock, flags);
291} 312}
292 313
@@ -297,9 +318,9 @@ static void tc6393xb_irq_unmask(unsigned int irq)
297 u8 imr; 318 u8 imr;
298 319
299 spin_lock_irqsave(&tc6393xb->lock, flags); 320 spin_lock_irqsave(&tc6393xb->lock, flags);
300 imr = ioread8(tc6393xb->scr + SCR_IMR); 321 imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
301 imr &= ~(1 << (irq - tc6393xb->irq_base)); 322 imr &= ~(1 << (irq - tc6393xb->irq_base));
302 iowrite8(imr, tc6393xb->scr + SCR_IMR); 323 tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
303 spin_unlock_irqrestore(&tc6393xb->lock, flags); 324 spin_unlock_irqrestore(&tc6393xb->lock, flags);
304} 325}
305 326
@@ -380,9 +401,8 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
380{ 401{
381 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; 402 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
382 struct tc6393xb *tc6393xb; 403 struct tc6393xb *tc6393xb;
383 struct resource *iomem; 404 struct resource *iomem, *rscr;
384 struct resource *rscr; 405 int ret, temp;
385 int retval, temp;
386 int i; 406 int i;
387 407
388 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0); 408 iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -391,20 +411,26 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
391 411
392 tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL); 412 tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL);
393 if (!tc6393xb) { 413 if (!tc6393xb) {
394 retval = -ENOMEM; 414 ret = -ENOMEM;
395 goto err_kzalloc; 415 goto err_kzalloc;
396 } 416 }
397 417
398 spin_lock_init(&tc6393xb->lock); 418 spin_lock_init(&tc6393xb->lock);
399 419
400 platform_set_drvdata(dev, tc6393xb); 420 platform_set_drvdata(dev, tc6393xb);
421
422 ret = platform_get_irq(dev, 0);
423 if (ret >= 0)
424 tc6393xb->irq = ret;
425 else
426 goto err_noirq;
427
401 tc6393xb->iomem = iomem; 428 tc6393xb->iomem = iomem;
402 tc6393xb->irq = platform_get_irq(dev, 0);
403 tc6393xb->irq_base = tcpd->irq_base; 429 tc6393xb->irq_base = tcpd->irq_base;
404 430
405 tc6393xb->clk = clk_get(&dev->dev, "GPIO27_CLK" /* "CK3P6MI" */); 431 tc6393xb->clk = clk_get(&dev->dev, "CLK_CK3P6MI");
406 if (IS_ERR(tc6393xb->clk)) { 432 if (IS_ERR(tc6393xb->clk)) {
407 retval = PTR_ERR(tc6393xb->clk); 433 ret = PTR_ERR(tc6393xb->clk);
408 goto err_clk_get; 434 goto err_clk_get;
409 } 435 }
410 436
@@ -414,71 +440,73 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
414 rscr->end = iomem->start + 0xff; 440 rscr->end = iomem->start + 0xff;
415 rscr->flags = IORESOURCE_MEM; 441 rscr->flags = IORESOURCE_MEM;
416 442
417 retval = request_resource(iomem, rscr); 443 ret = request_resource(iomem, rscr);
418 if (retval) 444 if (ret)
419 goto err_request_scr; 445 goto err_request_scr;
420 446
421 tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1); 447 tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
422 if (!tc6393xb->scr) { 448 if (!tc6393xb->scr) {
423 retval = -ENOMEM; 449 ret = -ENOMEM;
424 goto err_ioremap; 450 goto err_ioremap;
425 } 451 }
426 452
427 retval = clk_enable(tc6393xb->clk); 453 ret = clk_enable(tc6393xb->clk);
428 if (retval) 454 if (ret)
429 goto err_clk_enable; 455 goto err_clk_enable;
430 456
431 retval = tcpd->enable(dev); 457 ret = tcpd->enable(dev);
432 if (retval) 458 if (ret)
433 goto err_enable; 459 goto err_enable;
434 460
435 tc6393xb->suspend_state.fer = 0; 461 tc6393xb->suspend_state.fer = 0;
462
436 for (i = 0; i < 3; i++) { 463 for (i = 0; i < 3; i++) {
437 tc6393xb->suspend_state.gpo_dsr[i] = 464 tc6393xb->suspend_state.gpo_dsr[i] =
438 (tcpd->scr_gpo_dsr >> (8 * i)) & 0xff; 465 (tcpd->scr_gpo_dsr >> (8 * i)) & 0xff;
439 tc6393xb->suspend_state.gpo_doecr[i] = 466 tc6393xb->suspend_state.gpo_doecr[i] =
440 (tcpd->scr_gpo_doecr >> (8 * i)) & 0xff; 467 (tcpd->scr_gpo_doecr >> (8 * i)) & 0xff;
441 } 468 }
442 /* 469
443 * It may be necessary to change this back to
444 * platform-dependant code
445 */
446 tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 | 470 tc6393xb->suspend_state.ccr = SCR_CCR_UNK1 |
447 SCR_CCR_HCLK_48; 471 SCR_CCR_HCLK_48;
448 472
449 retval = tc6393xb_hw_init(dev); 473 ret = tc6393xb_hw_init(dev);
450 if (retval) 474 if (ret)
451 goto err_hw_init; 475 goto err_hw_init;
452 476
453 printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n", 477 printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n",
454 ioread8(tc6393xb->scr + SCR_REVID), 478 tmio_ioread8(tc6393xb->scr + SCR_REVID),
455 (unsigned long) iomem->start, tc6393xb->irq); 479 (unsigned long) iomem->start, tc6393xb->irq);
456 480
457 tc6393xb->gpio.base = -1; 481 tc6393xb->gpio.base = -1;
458 482
459 if (tcpd->gpio_base >= 0) { 483 if (tcpd->gpio_base >= 0) {
460 retval = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base); 484 ret = tc6393xb_register_gpio(tc6393xb, tcpd->gpio_base);
461 if (retval) 485 if (ret)
462 goto err_gpio_add; 486 goto err_gpio_add;
463 } 487 }
464 488
465 if (tc6393xb->irq) 489 tc6393xb_attach_irq(dev);
466 tc6393xb_attach_irq(dev);
467 490
468 tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data; 491 tc6393xb_cells[TC6393XB_CELL_NAND].driver_data = tcpd->nand_data;
469 tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = 492 tc6393xb_cells[TC6393XB_CELL_NAND].platform_data =
470 &tc6393xb_cells[TC6393XB_CELL_NAND]; 493 &tc6393xb_cells[TC6393XB_CELL_NAND];
471 tc6393xb_cells[TC6393XB_CELL_NAND].data_size = 494 tc6393xb_cells[TC6393XB_CELL_NAND].data_size =
472 sizeof(tc6393xb_cells[TC6393XB_CELL_NAND]); 495 sizeof(tc6393xb_cells[TC6393XB_CELL_NAND]);
496 tc6393xb_cells[TC6393XB_CELL_MMC].platform_data =
497 &tc6393xb_cells[TC6393XB_CELL_MMC];
498 tc6393xb_cells[TC6393XB_CELL_MMC].data_size =
499 sizeof(tc6393xb_cells[TC6393XB_CELL_MMC]);
500
473 501
474 retval = mfd_add_devices(&dev->dev, dev->id, 502 ret = mfd_add_devices(&dev->dev, dev->id,
475 tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells), 503 tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
476 iomem, tcpd->irq_base); 504 iomem, tcpd->irq_base);
477 505
478 return 0; 506 if (!ret)
507 return 0;
479 508
480 if (tc6393xb->irq) 509 tc6393xb_detach_irq(dev);
481 tc6393xb_detach_irq(dev);
482 510
483err_gpio_add: 511err_gpio_add:
484 if (tc6393xb->gpio.base != -1) 512 if (tc6393xb->gpio.base != -1)
@@ -493,10 +521,11 @@ err_ioremap:
493 release_resource(&tc6393xb->rscr); 521 release_resource(&tc6393xb->rscr);
494err_request_scr: 522err_request_scr:
495 clk_put(tc6393xb->clk); 523 clk_put(tc6393xb->clk);
524err_noirq:
496err_clk_get: 525err_clk_get:
497 kfree(tc6393xb); 526 kfree(tc6393xb);
498err_kzalloc: 527err_kzalloc:
499 return retval; 528 return ret;
500} 529}
501 530
502static int __devexit tc6393xb_remove(struct platform_device *dev) 531static int __devexit tc6393xb_remove(struct platform_device *dev)
@@ -506,9 +535,7 @@ static int __devexit tc6393xb_remove(struct platform_device *dev)
506 int ret; 535 int ret;
507 536
508 mfd_remove_devices(&dev->dev); 537 mfd_remove_devices(&dev->dev);
509 538 tc6393xb_detach_irq(dev);
510 if (tc6393xb->irq)
511 tc6393xb_detach_irq(dev);
512 539
513 if (tc6393xb->gpio.base != -1) { 540 if (tc6393xb->gpio.base != -1) {
514 ret = gpiochip_remove(&tc6393xb->gpio); 541 ret = gpiochip_remove(&tc6393xb->gpio);
@@ -519,17 +546,11 @@ static int __devexit tc6393xb_remove(struct platform_device *dev)
519 } 546 }
520 547
521 ret = tcpd->disable(dev); 548 ret = tcpd->disable(dev);
522
523 clk_disable(tc6393xb->clk); 549 clk_disable(tc6393xb->clk);
524
525 iounmap(tc6393xb->scr); 550 iounmap(tc6393xb->scr);
526
527 release_resource(&tc6393xb->rscr); 551 release_resource(&tc6393xb->rscr);
528
529 platform_set_drvdata(dev, NULL); 552 platform_set_drvdata(dev, NULL);
530
531 clk_put(tc6393xb->clk); 553 clk_put(tc6393xb->clk);
532
533 kfree(tc6393xb); 554 kfree(tc6393xb);
534 555
535 return ret; 556 return ret;
@@ -540,8 +561,7 @@ static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
540{ 561{
541 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; 562 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
542 struct tc6393xb *tc6393xb = platform_get_drvdata(dev); 563 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
543 int i; 564 int i, ret;
544
545 565
546 tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR); 566 tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR);
547 tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER); 567 tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER);
@@ -554,14 +574,21 @@ static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
554 tc6393xb->suspend_state.gpi_bcr[i] = 574 tc6393xb->suspend_state.gpi_bcr[i] =
555 ioread8(tc6393xb->scr + SCR_GPI_BCR(i)); 575 ioread8(tc6393xb->scr + SCR_GPI_BCR(i));
556 } 576 }
577 ret = tcpd->suspend(dev);
578 clk_disable(tc6393xb->clk);
557 579
558 return tcpd->suspend(dev); 580 return ret;
559} 581}
560 582
561static int tc6393xb_resume(struct platform_device *dev) 583static int tc6393xb_resume(struct platform_device *dev)
562{ 584{
563 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data; 585 struct tc6393xb_platform_data *tcpd = dev->dev.platform_data;
564 int ret = tcpd->resume(dev); 586 struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
587 int ret;
588
589 clk_enable(tc6393xb->clk);
590
591 ret = tcpd->resume(dev);
565 592
566 if (ret) 593 if (ret)
567 return ret; 594 return ret;
@@ -598,7 +625,7 @@ static void __exit tc6393xb_exit(void)
598subsys_initcall(tc6393xb_init); 625subsys_initcall(tc6393xb_init);
599module_exit(tc6393xb_exit); 626module_exit(tc6393xb_exit);
600 627
601MODULE_LICENSE("GPL"); 628MODULE_LICENSE("GPL v2");
602MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer"); 629MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
603MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller"); 630MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
604MODULE_ALIAS("platform:tc6393xb"); 631MODULE_ALIAS("platform:tc6393xb");
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
index e7a3fe508dff..b2d9878dc3f0 100644
--- a/drivers/misc/acer-wmi.c
+++ b/drivers/misc/acer-wmi.c
@@ -803,11 +803,30 @@ static acpi_status get_u32(u32 *value, u32 cap)
803 803
804static acpi_status set_u32(u32 value, u32 cap) 804static acpi_status set_u32(u32 value, u32 cap)
805{ 805{
806 acpi_status status;
807
806 if (interface->capability & cap) { 808 if (interface->capability & cap) {
807 switch (interface->type) { 809 switch (interface->type) {
808 case ACER_AMW0: 810 case ACER_AMW0:
809 return AMW0_set_u32(value, cap, interface); 811 return AMW0_set_u32(value, cap, interface);
810 case ACER_AMW0_V2: 812 case ACER_AMW0_V2:
813 if (cap == ACER_CAP_MAILLED)
814 return AMW0_set_u32(value, cap, interface);
815
816 /*
817 * On some models, some WMID methods don't toggle
818 * properly. For those cases, we want to run the AMW0
819 * method afterwards to be certain we've really toggled
820 * the device state.
821 */
822 if (cap == ACER_CAP_WIRELESS ||
823 cap == ACER_CAP_BLUETOOTH) {
824 status = WMID_set_u32(value, cap, interface);
825 if (ACPI_FAILURE(status))
826 return status;
827
828 return AMW0_set_u32(value, cap, interface);
829 }
811 case ACER_WMID: 830 case ACER_WMID:
812 return WMID_set_u32(value, cap, interface); 831 return WMID_set_u32(value, cap, interface);
813 default: 832 default:
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 4251018f70ff..a78f70deeb59 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -279,7 +279,7 @@ struct gru_stats_s {
279#if defined CONFIG_IA64 279#if defined CONFIG_IA64
280#define VADDR_HI_BIT 64 280#define VADDR_HI_BIT 64
281#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) 281#define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3)
282#elif defined __x86_64 282#elif defined CONFIG_X86_64
283#define VADDR_HI_BIT 48 283#define VADDR_HI_BIT 48
284#define GRUREGION(addr) (0) /* ZZZ could do better */ 284#define GRUREGION(addr) (0) /* ZZZ could do better */
285#else 285#else
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index dc6f2579f85c..ea8d7a3490d9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -174,3 +174,9 @@ config MMC_SDRICOH_CS
174 To compile this driver as a module, choose M here: the 174 To compile this driver as a module, choose M here: the
175 module will be called sdricoh_cs. 175 module will be called sdricoh_cs.
176 176
177config MMC_TMIO
178 tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
179 depends on MFD_TMIO
180 help
181 This provides support for the SD/MMC cell found in TC6393XB,
182 T7L66XB and also ipaq ASIC3
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index db52eebfb50e..c794cc5ce442 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -21,4 +21,5 @@ obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
21obj-$(CONFIG_MMC_SPI) += mmc_spi.o 21obj-$(CONFIG_MMC_SPI) += mmc_spi.o
22obj-$(CONFIG_MMC_S3C) += s3cmci.o 22obj-$(CONFIG_MMC_S3C) += s3cmci.o
23obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o 23obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
24obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
24 25
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 7c994e1ae276..ae16d845d746 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -595,8 +595,9 @@ static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id)
595 return IRQ_HANDLED; 595 return IRQ_HANDLED;
596} 596}
597 597
598void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch, void *buf_id, 598static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch,
599 int size, enum s3c2410_dma_buffresult result) 599 void *buf_id, int size,
600 enum s3c2410_dma_buffresult result)
600{ 601{
601 struct s3cmci_host *host = buf_id; 602 struct s3cmci_host *host = buf_id;
602 unsigned long iflags; 603 unsigned long iflags;
@@ -740,8 +741,8 @@ request_done:
740 mmc_request_done(host->mmc, mrq); 741 mmc_request_done(host->mmc, mrq);
741} 742}
742 743
743 744static void s3cmci_dma_setup(struct s3cmci_host *host,
744void s3cmci_dma_setup(struct s3cmci_host *host, enum s3c2410_dmasrc source) 745 enum s3c2410_dmasrc source)
745{ 746{
746 static enum s3c2410_dmasrc last_source = -1; 747 static enum s3c2410_dmasrc last_source = -1;
747 static int setup_ok; 748 static int setup_ok;
@@ -1003,8 +1004,9 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1003 enable_irq(host->irq); 1004 enable_irq(host->irq);
1004} 1005}
1005 1006
1006static int s3cmci_card_present(struct s3cmci_host *host) 1007static int s3cmci_card_present(struct mmc_host *mmc)
1007{ 1008{
1009 struct s3cmci_host *host = mmc_priv(mmc);
1008 struct s3c24xx_mci_pdata *pdata = host->pdata; 1010 struct s3c24xx_mci_pdata *pdata = host->pdata;
1009 int ret; 1011 int ret;
1010 1012
@@ -1023,7 +1025,7 @@ static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1023 host->cmd_is_stop = 0; 1025 host->cmd_is_stop = 0;
1024 host->mrq = mrq; 1026 host->mrq = mrq;
1025 1027
1026 if (s3cmci_card_present(host) == 0) { 1028 if (s3cmci_card_present(mmc) == 0) {
1027 dbg(host, dbg_err, "%s: no medium present\n", __func__); 1029 dbg(host, dbg_err, "%s: no medium present\n", __func__);
1028 host->mrq->cmd->error = -ENOMEDIUM; 1030 host->mrq->cmd->error = -ENOMEDIUM;
1029 mmc_request_done(mmc, mrq); 1031 mmc_request_done(mmc, mrq);
@@ -1138,6 +1140,7 @@ static struct mmc_host_ops s3cmci_ops = {
1138 .request = s3cmci_request, 1140 .request = s3cmci_request,
1139 .set_ios = s3cmci_set_ios, 1141 .set_ios = s3cmci_set_ios,
1140 .get_ro = s3cmci_get_ro, 1142 .get_ro = s3cmci_get_ro,
1143 .get_cd = s3cmci_card_present,
1141}; 1144};
1142 1145
1143static struct s3c24xx_mci_pdata s3cmci_def_pdata = { 1146static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
@@ -1206,7 +1209,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1206 } 1209 }
1207 1210
1208 host->base = ioremap(host->mem->start, RESSIZE(host->mem)); 1211 host->base = ioremap(host->mem->start, RESSIZE(host->mem));
1209 if (host->base == 0) { 1212 if (!host->base) {
1210 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); 1213 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
1211 ret = -EINVAL; 1214 ret = -EINVAL;
1212 goto probe_free_mem_region; 1215 goto probe_free_mem_region;
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index f99e9f721629..1df44d966bdb 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -29,7 +29,6 @@
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/ioport.h> 30#include <linux/ioport.h>
31#include <linux/scatterlist.h> 31#include <linux/scatterlist.h>
32#include <linux/version.h>
33 32
34#include <pcmcia/cs_types.h> 33#include <pcmcia/cs_types.h>
35#include <pcmcia/cs.h> 34#include <pcmcia/cs.h>
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
new file mode 100644
index 000000000000..95430b81ec11
--- /dev/null
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -0,0 +1,691 @@
1/*
2 * linux/drivers/mmc/tmio_mmc.c
3 *
4 * Copyright (C) 2004 Ian Molton
5 * Copyright (C) 2007 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Driver for the MMC / SD / SDIO cell found in:
12 *
13 * TC6393XB TC6391XB TC6387XB T7L66XB
14 *
15 * This driver draws mainly on scattered spec sheets, Reverse engineering
16 * of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
17 * support). (Further 4 bit support from a later datasheet).
18 *
19 * TODO:
20 * Investigate using a workqueue for PIO transfers
21 * Eliminate FIXMEs
22 * SDIO support
23 * Better Power management
24 * Handle MMC errors better
25 * double buffer support
26 *
27 */
28#include <linux/module.h>
29#include <linux/irq.h>
30#include <linux/device.h>
31#include <linux/delay.h>
32#include <linux/mmc/host.h>
33#include <linux/mfd/core.h>
34#include <linux/mfd/tmio.h>
35
36#include "tmio_mmc.h"
37
38/*
39 * Fixme - documentation conflicts on what the clock values are for the
40 * various dividers.
41 * One document I have says that its a divisor of a 24MHz clock, another 33.
42 * This probably depends on HCLK for a given platform, so we may need to
43 * require HCLK be passed to us from the MFD core.
44 *
45 */
46
47static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
48{
49 void __iomem *cnf = host->cnf;
50 void __iomem *ctl = host->ctl;
51 u32 clk = 0, clock;
52
53 if (new_clock) {
54 for (clock = 46875, clk = 0x100; new_clock >= (clock<<1); ) {
55 clock <<= 1;
56 clk >>= 1;
57 }
58 if (clk & 0x1)
59 clk = 0x20000;
60
61 clk >>= 2;
62 tmio_iowrite8((clk & 0x8000) ? 0 : 1, cnf + CNF_SD_CLK_MODE);
63 clk |= 0x100;
64 }
65
66 tmio_iowrite16(clk, ctl + CTL_SD_CARD_CLK_CTL);
67}
68
69static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
70{
71 void __iomem *ctl = host->ctl;
72
73 tmio_iowrite16(0x0000, ctl + CTL_CLK_AND_WAIT_CTL);
74 msleep(10);
75 tmio_iowrite16(tmio_ioread16(ctl + CTL_SD_CARD_CLK_CTL) & ~0x0100,
76 ctl + CTL_SD_CARD_CLK_CTL);
77 msleep(10);
78}
79
80static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
81{
82 void __iomem *ctl = host->ctl;
83
84 tmio_iowrite16(tmio_ioread16(ctl + CTL_SD_CARD_CLK_CTL) | 0x0100,
85 ctl + CTL_SD_CARD_CLK_CTL);
86 msleep(10);
87 tmio_iowrite16(0x0100, ctl + CTL_CLK_AND_WAIT_CTL);
88 msleep(10);
89}
90
91static void reset(struct tmio_mmc_host *host)
92{
93 void __iomem *ctl = host->ctl;
94
95 /* FIXME - should we set stop clock reg here */
96 tmio_iowrite16(0x0000, ctl + CTL_RESET_SD);
97 tmio_iowrite16(0x0000, ctl + CTL_RESET_SDIO);
98 msleep(10);
99 tmio_iowrite16(0x0001, ctl + CTL_RESET_SD);
100 tmio_iowrite16(0x0001, ctl + CTL_RESET_SDIO);
101 msleep(10);
102}
103
104static void
105tmio_mmc_finish_request(struct tmio_mmc_host *host)
106{
107 struct mmc_request *mrq = host->mrq;
108
109 host->mrq = NULL;
110 host->cmd = NULL;
111 host->data = NULL;
112
113 mmc_request_done(host->mmc, mrq);
114}
115
116/* These are the bitmasks the tmio chip requires to implement the MMC response
117 * types. Note that R1 and R6 are the same in this scheme. */
118#define APP_CMD 0x0040
119#define RESP_NONE 0x0300
120#define RESP_R1 0x0400
121#define RESP_R1B 0x0500
122#define RESP_R2 0x0600
123#define RESP_R3 0x0700
124#define DATA_PRESENT 0x0800
125#define TRANSFER_READ 0x1000
126#define TRANSFER_MULTI 0x2000
127#define SECURITY_CMD 0x4000
128
129static int
130tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
131{
132 void __iomem *ctl = host->ctl;
133 struct mmc_data *data = host->data;
134 int c = cmd->opcode;
135
136 /* Command 12 is handled by hardware */
137 if (cmd->opcode == 12 && !cmd->arg) {
138 tmio_iowrite16(0x001, ctl + CTL_STOP_INTERNAL_ACTION);
139 return 0;
140 }
141
142 switch (mmc_resp_type(cmd)) {
143 case MMC_RSP_NONE: c |= RESP_NONE; break;
144 case MMC_RSP_R1: c |= RESP_R1; break;
145 case MMC_RSP_R1B: c |= RESP_R1B; break;
146 case MMC_RSP_R2: c |= RESP_R2; break;
147 case MMC_RSP_R3: c |= RESP_R3; break;
148 default:
149 pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
150 return -EINVAL;
151 }
152
153 host->cmd = cmd;
154
155/* FIXME - this seems to be ok comented out but the spec suggest this bit should
156 * be set when issuing app commands.
157 * if(cmd->flags & MMC_FLAG_ACMD)
158 * c |= APP_CMD;
159 */
160 if (data) {
161 c |= DATA_PRESENT;
162 if (data->blocks > 1) {
163 tmio_iowrite16(0x100, ctl + CTL_STOP_INTERNAL_ACTION);
164 c |= TRANSFER_MULTI;
165 }
166 if (data->flags & MMC_DATA_READ)
167 c |= TRANSFER_READ;
168 }
169
170 enable_mmc_irqs(ctl, TMIO_MASK_CMD);
171
172 /* Fire off the command */
173 tmio_iowrite32(cmd->arg, ctl + CTL_ARG_REG);
174 tmio_iowrite16(c, ctl + CTL_SD_CMD);
175
176 return 0;
177}
178
179/* This chip always returns (at least?) as much data as you ask for.
180 * I'm unsure what happens if you ask for less than a block. This should be
181 * looked into to ensure that a funny length read doesnt hose the controller.
182 *
183 */
184static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
185{
186 void __iomem *ctl = host->ctl;
187 struct mmc_data *data = host->data;
188 unsigned short *buf;
189 unsigned int count;
190 unsigned long flags;
191
192 if (!data) {
193 pr_debug("Spurious PIO IRQ\n");
194 return;
195 }
196
197 buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
198 host->sg_off);
199
200 count = host->sg_ptr->length - host->sg_off;
201 if (count > data->blksz)
202 count = data->blksz;
203
204 pr_debug("count: %08x offset: %08x flags %08x\n",
205 count, host->sg_off, data->flags);
206
207 /* Transfer the data */
208 if (data->flags & MMC_DATA_READ)
209 tmio_ioread16_rep(ctl + CTL_SD_DATA_PORT, buf, count >> 1);
210 else
211 tmio_iowrite16_rep(ctl + CTL_SD_DATA_PORT, buf, count >> 1);
212
213 host->sg_off += count;
214
215 tmio_mmc_kunmap_atomic(host, &flags);
216
217 if (host->sg_off == host->sg_ptr->length)
218 tmio_mmc_next_sg(host);
219
220 return;
221}
222
223static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
224{
225 void __iomem *ctl = host->ctl;
226 struct mmc_data *data = host->data;
227 struct mmc_command *stop = data->stop;
228
229 host->data = NULL;
230
231 if (!data) {
232 pr_debug("Spurious data end IRQ\n");
233 return;
234 }
235
236 /* FIXME - return correct transfer count on errors */
237 if (!data->error)
238 data->bytes_xfered = data->blocks * data->blksz;
239 else
240 data->bytes_xfered = 0;
241
242 pr_debug("Completed data request\n");
243
244 /*FIXME - other drivers allow an optional stop command of any given type
245 * which we dont do, as the chip can auto generate them.
246 * Perhaps we can be smarter about when to use auto CMD12 and
247 * only issue the auto request when we know this is the desired
248 * stop command, allowing fallback to the stop command the
249 * upper layers expect. For now, we do what works.
250 */
251
252 if (data->flags & MMC_DATA_READ)
253 disable_mmc_irqs(ctl, TMIO_MASK_READOP);
254 else
255 disable_mmc_irqs(ctl, TMIO_MASK_WRITEOP);
256
257 if (stop) {
258 if (stop->opcode == 12 && !stop->arg)
259 tmio_iowrite16(0x000, ctl + CTL_STOP_INTERNAL_ACTION);
260 else
261 BUG();
262 }
263
264 tmio_mmc_finish_request(host);
265}
266
267static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
268 unsigned int stat)
269{
270 void __iomem *ctl = host->ctl, *addr;
271 struct mmc_command *cmd = host->cmd;
272 int i;
273
274 if (!host->cmd) {
275 pr_debug("Spurious CMD irq\n");
276 return;
277 }
278
279 host->cmd = NULL;
280
281 /* This controller is sicker than the PXA one. Not only do we need to
282 * drop the top 8 bits of the first response word, we also need to
283 * modify the order of the response for short response command types.
284 */
285
286 for (i = 3, addr = ctl + CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
287 cmd->resp[i] = tmio_ioread32(addr);
288
289 if (cmd->flags & MMC_RSP_136) {
290 cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
291 cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
292 cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
293 cmd->resp[3] <<= 8;
294 } else if (cmd->flags & MMC_RSP_R3) {
295 cmd->resp[0] = cmd->resp[3];
296 }
297
298 if (stat & TMIO_STAT_CMDTIMEOUT)
299 cmd->error = -ETIMEDOUT;
300 else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
301 cmd->error = -EILSEQ;
302
303 /* If there is data to handle we enable data IRQs here, and
304 * we will ultimatley finish the request in the data_end handler.
305 * If theres no data or we encountered an error, finish now.
306 */
307 if (host->data && !cmd->error) {
308 if (host->data->flags & MMC_DATA_READ)
309 enable_mmc_irqs(ctl, TMIO_MASK_READOP);
310 else
311 enable_mmc_irqs(ctl, TMIO_MASK_WRITEOP);
312 } else {
313 tmio_mmc_finish_request(host);
314 }
315
316 return;
317}
318
319
320static irqreturn_t tmio_mmc_irq(int irq, void *devid)
321{
322 struct tmio_mmc_host *host = devid;
323 void __iomem *ctl = host->ctl;
324 unsigned int ireg, irq_mask, status;
325
326 pr_debug("MMC IRQ begin\n");
327
328 status = tmio_ioread32(ctl + CTL_STATUS);
329 irq_mask = tmio_ioread32(ctl + CTL_IRQ_MASK);
330 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
331
332 pr_debug_status(status);
333 pr_debug_status(ireg);
334
335 if (!ireg) {
336 disable_mmc_irqs(ctl, status & ~irq_mask);
337
338 pr_debug("tmio_mmc: Spurious irq, disabling! "
339 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
340 pr_debug_status(status);
341
342 goto out;
343 }
344
345 while (ireg) {
346 /* Card insert / remove attempts */
347 if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
348 ack_mmc_irqs(ctl, TMIO_STAT_CARD_INSERT |
349 TMIO_STAT_CARD_REMOVE);
350 mmc_detect_change(host->mmc, 0);
351 }
352
353 /* CRC and other errors */
354/* if (ireg & TMIO_STAT_ERR_IRQ)
355 * handled |= tmio_error_irq(host, irq, stat);
356 */
357
358 /* Command completion */
359 if (ireg & TMIO_MASK_CMD) {
360 ack_mmc_irqs(ctl, TMIO_MASK_CMD);
361 tmio_mmc_cmd_irq(host, status);
362 }
363
364 /* Data transfer */
365 if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
366 ack_mmc_irqs(ctl, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
367 tmio_mmc_pio_irq(host);
368 }
369
370 /* Data transfer completion */
371 if (ireg & TMIO_STAT_DATAEND) {
372 ack_mmc_irqs(ctl, TMIO_STAT_DATAEND);
373 tmio_mmc_data_irq(host);
374 }
375
376 /* Check status - keep going until we've handled it all */
377 status = tmio_ioread32(ctl + CTL_STATUS);
378 irq_mask = tmio_ioread32(ctl + CTL_IRQ_MASK);
379 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
380
381 pr_debug("Status at end of loop: %08x\n", status);
382 pr_debug_status(status);
383 }
384 pr_debug("MMC IRQ end\n");
385
386out:
387 return IRQ_HANDLED;
388}
389
390static int tmio_mmc_start_data(struct tmio_mmc_host *host,
391 struct mmc_data *data)
392{
393 void __iomem *ctl = host->ctl;
394
395 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
396 data->blksz, data->blocks);
397
398 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
399 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
400 printk(KERN_ERR "%s: %d byte block unsupported in 4 bit mode\n",
401 mmc_hostname(host->mmc), data->blksz);
402 return -EINVAL;
403 }
404
405 tmio_mmc_init_sg(host, data);
406 host->data = data;
407
408 /* Set transfer length / blocksize */
409 tmio_iowrite16(data->blksz, ctl + CTL_SD_XFER_LEN);
410 tmio_iowrite16(data->blocks, ctl + CTL_XFER_BLK_COUNT);
411
412 return 0;
413}
414
415/* Process requests from the MMC layer */
416static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
417{
418 struct tmio_mmc_host *host = mmc_priv(mmc);
419 int ret;
420
421 if (host->mrq)
422 pr_debug("request not null\n");
423
424 host->mrq = mrq;
425
426 if (mrq->data) {
427 ret = tmio_mmc_start_data(host, mrq->data);
428 if (ret)
429 goto fail;
430 }
431
432 ret = tmio_mmc_start_command(host, mrq->cmd);
433
434 if (!ret)
435 return;
436
437fail:
438 mrq->cmd->error = ret;
439 mmc_request_done(mmc, mrq);
440}
441
442/* Set MMC clock / power.
443 * Note: This controller uses a simple divider scheme therefore it cannot
444 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
445 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
446 * slowest setting.
447 */
448static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
449{
450 struct tmio_mmc_host *host = mmc_priv(mmc);
451 void __iomem *cnf = host->cnf;
452 void __iomem *ctl = host->ctl;
453
454 if (ios->clock)
455 tmio_mmc_set_clock(host, ios->clock);
456
457 /* Power sequence - OFF -> ON -> UP */
458 switch (ios->power_mode) {
459 case MMC_POWER_OFF: /* power down SD bus */
460 tmio_iowrite8(0x00, cnf + CNF_PWR_CTL_2);
461 tmio_mmc_clk_stop(host);
462 break;
463 case MMC_POWER_ON: /* power up SD bus */
464
465 tmio_iowrite8(0x02, cnf + CNF_PWR_CTL_2);
466 break;
467 case MMC_POWER_UP: /* start bus clock */
468 tmio_mmc_clk_start(host);
469 break;
470 }
471
472 switch (ios->bus_width) {
473 case MMC_BUS_WIDTH_1:
474 tmio_iowrite16(0x80e0, ctl + CTL_SD_MEM_CARD_OPT);
475 break;
476 case MMC_BUS_WIDTH_4:
477 tmio_iowrite16(0x00e0, ctl + CTL_SD_MEM_CARD_OPT);
478 break;
479 }
480
481 /* Let things settle. delay taken from winCE driver */
482 udelay(140);
483}
484
485static int tmio_mmc_get_ro(struct mmc_host *mmc)
486{
487 struct tmio_mmc_host *host = mmc_priv(mmc);
488 void __iomem *ctl = host->ctl;
489
490 return (tmio_ioread16(ctl + CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1;
491}
492
493static struct mmc_host_ops tmio_mmc_ops = {
494 .request = tmio_mmc_request,
495 .set_ios = tmio_mmc_set_ios,
496 .get_ro = tmio_mmc_get_ro,
497};
498
499#ifdef CONFIG_PM
500static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
501{
502 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
503 struct mmc_host *mmc = platform_get_drvdata(dev);
504 int ret;
505
506 ret = mmc_suspend_host(mmc, state);
507
508 /* Tell MFD core it can disable us now.*/
509 if (!ret && cell->disable)
510 cell->disable(dev);
511
512 return ret;
513}
514
515static int tmio_mmc_resume(struct platform_device *dev)
516{
517 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
518 struct mmc_host *mmc = platform_get_drvdata(dev);
519 struct tmio_mmc_host *host = mmc_priv(mmc);
520 void __iomem *cnf = host->cnf;
521 int ret = 0;
522
523 /* Enable the MMC/SD Control registers */
524 tmio_iowrite16(SDCREN, cnf + CNF_CMD);
525 tmio_iowrite32(dev->resource[0].start & 0xfffe, cnf + CNF_CTL_BASE);
526
527 /* Tell the MFD core we are ready to be enabled */
528 if (cell->enable) {
529 ret = cell->enable(dev);
530 if (ret)
531 goto out;
532 }
533
534 mmc_resume_host(mmc);
535
536out:
537 return ret;
538}
539#else
540#define tmio_mmc_suspend NULL
541#define tmio_mmc_resume NULL
542#endif
543
544static int __devinit tmio_mmc_probe(struct platform_device *dev)
545{
546 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
547 struct resource *res_ctl, *res_cnf;
548 struct tmio_mmc_host *host;
549 struct mmc_host *mmc;
550 int ret = -ENOMEM;
551
552 if (dev->num_resources != 3)
553 goto out;
554
555 res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
556 res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1);
557 if (!res_ctl || !res_cnf) {
558 ret = -EINVAL;
559 goto out;
560 }
561
562 mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev);
563 if (!mmc)
564 goto out;
565
566 host = mmc_priv(mmc);
567 host->mmc = mmc;
568 platform_set_drvdata(dev, mmc);
569
570 host->ctl = ioremap(res_ctl->start, res_ctl->end - res_ctl->start);
571 if (!host->ctl)
572 goto host_free;
573
574 host->cnf = ioremap(res_cnf->start, res_cnf->end - res_cnf->start);
575 if (!host->cnf)
576 goto unmap_ctl;
577
578 mmc->ops = &tmio_mmc_ops;
579 mmc->caps = MMC_CAP_4_BIT_DATA;
580 mmc->f_min = 46875; /* 24000000 / 512 */
581 mmc->f_max = 24000000;
582 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
583
584 /* Enable the MMC/SD Control registers */
585 tmio_iowrite16(SDCREN, host->cnf + CNF_CMD);
586 tmio_iowrite32(dev->resource[0].start & 0xfffe,
587 host->cnf + CNF_CTL_BASE);
588
589 /* Tell the MFD core we are ready to be enabled */
590 if (cell->enable) {
591 ret = cell->enable(dev);
592 if (ret)
593 goto unmap_cnf;
594 }
595
596 /* Disable SD power during suspend */
597 tmio_iowrite8(0x01, host->cnf + CNF_PWR_CTL_3);
598
599 /* The below is required but why? FIXME */
600 tmio_iowrite8(0x1f, host->cnf + CNF_STOP_CLK_CTL);
601
602 /* Power down SD bus*/
603 tmio_iowrite8(0x0, host->cnf + CNF_PWR_CTL_2);
604
605 tmio_mmc_clk_stop(host);
606 reset(host);
607
608 ret = platform_get_irq(dev, 0);
609 if (ret >= 0)
610 host->irq = ret;
611 else
612 goto unmap_cnf;
613
614 disable_mmc_irqs(host->ctl, TMIO_MASK_ALL);
615
616 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED, "tmio-mmc",
617 host);
618 if (ret)
619 goto unmap_cnf;
620
621 set_irq_type(host->irq, IRQ_TYPE_EDGE_FALLING);
622
623 mmc_add_host(mmc);
624
625 printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
626 (unsigned long)host->ctl, host->irq);
627
628 /* Unmask the IRQs we want to know about */
629 enable_mmc_irqs(host->ctl, TMIO_MASK_IRQ);
630
631 return 0;
632
633unmap_cnf:
634 iounmap(host->cnf);
635unmap_ctl:
636 iounmap(host->ctl);
637host_free:
638 mmc_free_host(mmc);
639out:
640 return ret;
641}
642
643static int __devexit tmio_mmc_remove(struct platform_device *dev)
644{
645 struct mmc_host *mmc = platform_get_drvdata(dev);
646
647 platform_set_drvdata(dev, NULL);
648
649 if (mmc) {
650 struct tmio_mmc_host *host = mmc_priv(mmc);
651 mmc_remove_host(mmc);
652 mmc_free_host(mmc);
653 free_irq(host->irq, host);
654 iounmap(host->ctl);
655 iounmap(host->cnf);
656 }
657
658 return 0;
659}
660
661/* ------------------- device registration ----------------------- */
662
663static struct platform_driver tmio_mmc_driver = {
664 .driver = {
665 .name = "tmio-mmc",
666 .owner = THIS_MODULE,
667 },
668 .probe = tmio_mmc_probe,
669 .remove = __devexit_p(tmio_mmc_remove),
670 .suspend = tmio_mmc_suspend,
671 .resume = tmio_mmc_resume,
672};
673
674
675static int __init tmio_mmc_init(void)
676{
677 return platform_driver_register(&tmio_mmc_driver);
678}
679
680static void __exit tmio_mmc_exit(void)
681{
682 platform_driver_unregister(&tmio_mmc_driver);
683}
684
685module_init(tmio_mmc_init);
686module_exit(tmio_mmc_exit);
687
688MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
689MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
690MODULE_LICENSE("GPL v2");
691MODULE_ALIAS("platform:tmio-mmc");
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
new file mode 100644
index 000000000000..9e647a06054f
--- /dev/null
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -0,0 +1,194 @@
1/* Definitons for use with the tmio_mmc.c
2 *
3 * (c) 2004 Ian Molton <spyro@f2s.com>
4 * (c) 2007 Ian Molton <spyro@f2s.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#define CNF_CMD 0x04
12#define CNF_CTL_BASE 0x10
13#define CNF_INT_PIN 0x3d
14#define CNF_STOP_CLK_CTL 0x40
15#define CNF_GCLK_CTL 0x41
16#define CNF_SD_CLK_MODE 0x42
17#define CNF_PIN_STATUS 0x44
18#define CNF_PWR_CTL_1 0x48
19#define CNF_PWR_CTL_2 0x49
20#define CNF_PWR_CTL_3 0x4a
21#define CNF_CARD_DETECT_MODE 0x4c
22#define CNF_SD_SLOT 0x50
23#define CNF_EXT_GCLK_CTL_1 0xf0
24#define CNF_EXT_GCLK_CTL_2 0xf1
25#define CNF_EXT_GCLK_CTL_3 0xf9
26#define CNF_SD_LED_EN_1 0xfa
27#define CNF_SD_LED_EN_2 0xfe
28
29#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
30
31#define CTL_SD_CMD 0x00
32#define CTL_ARG_REG 0x04
33#define CTL_STOP_INTERNAL_ACTION 0x08
34#define CTL_XFER_BLK_COUNT 0xa
35#define CTL_RESPONSE 0x0c
36#define CTL_STATUS 0x1c
37#define CTL_IRQ_MASK 0x20
38#define CTL_SD_CARD_CLK_CTL 0x24
39#define CTL_SD_XFER_LEN 0x26
40#define CTL_SD_MEM_CARD_OPT 0x28
41#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
42#define CTL_SD_DATA_PORT 0x30
43#define CTL_TRANSACTION_CTL 0x34
44#define CTL_RESET_SD 0xe0
45#define CTL_SDIO_REGS 0x100
46#define CTL_CLK_AND_WAIT_CTL 0x138
47#define CTL_RESET_SDIO 0x1e0
48
49/* Definitions for values the CTRL_STATUS register can take. */
50#define TMIO_STAT_CMDRESPEND 0x00000001
51#define TMIO_STAT_DATAEND 0x00000004
52#define TMIO_STAT_CARD_REMOVE 0x00000008
53#define TMIO_STAT_CARD_INSERT 0x00000010
54#define TMIO_STAT_SIGSTATE 0x00000020
55#define TMIO_STAT_WRPROTECT 0x00000080
56#define TMIO_STAT_CARD_REMOVE_A 0x00000100
57#define TMIO_STAT_CARD_INSERT_A 0x00000200
58#define TMIO_STAT_SIGSTATE_A 0x00000400
59#define TMIO_STAT_CMD_IDX_ERR 0x00010000
60#define TMIO_STAT_CRCFAIL 0x00020000
61#define TMIO_STAT_STOPBIT_ERR 0x00040000
62#define TMIO_STAT_DATATIMEOUT 0x00080000
63#define TMIO_STAT_RXOVERFLOW 0x00100000
64#define TMIO_STAT_TXUNDERRUN 0x00200000
65#define TMIO_STAT_CMDTIMEOUT 0x00400000
66#define TMIO_STAT_RXRDY 0x01000000
67#define TMIO_STAT_TXRQ 0x02000000
68#define TMIO_STAT_ILL_FUNC 0x20000000
69#define TMIO_STAT_CMD_BUSY 0x40000000
70#define TMIO_STAT_ILL_ACCESS 0x80000000
71
72/* Define some IRQ masks */
73/* This is the mask used at reset by the chip */
74#define TMIO_MASK_ALL 0x837f031d
75#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND | \
76 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
77#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND | \
78 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
79#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
80 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
81#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
82
83#define enable_mmc_irqs(ctl, i) \
84 do { \
85 u32 mask;\
86 mask = tmio_ioread32((ctl) + CTL_IRQ_MASK); \
87 mask &= ~((i) & TMIO_MASK_IRQ); \
88 tmio_iowrite32(mask, (ctl) + CTL_IRQ_MASK); \
89 } while (0)
90
91#define disable_mmc_irqs(ctl, i) \
92 do { \
93 u32 mask;\
94 mask = tmio_ioread32((ctl) + CTL_IRQ_MASK); \
95 mask |= ((i) & TMIO_MASK_IRQ); \
96 tmio_iowrite32(mask, (ctl) + CTL_IRQ_MASK); \
97 } while (0)
98
99#define ack_mmc_irqs(ctl, i) \
100 do { \
101 u32 mask;\
102 mask = tmio_ioread32((ctl) + CTL_STATUS); \
103 mask &= ~((i) & TMIO_MASK_IRQ); \
104 tmio_iowrite32(mask, (ctl) + CTL_STATUS); \
105 } while (0)
106
107
108struct tmio_mmc_host {
109 void __iomem *cnf;
110 void __iomem *ctl;
111 struct mmc_command *cmd;
112 struct mmc_request *mrq;
113 struct mmc_data *data;
114 struct mmc_host *mmc;
115 int irq;
116
117 /* pio related stuff */
118 struct scatterlist *sg_ptr;
119 unsigned int sg_len;
120 unsigned int sg_off;
121};
122
123#include <linux/scatterlist.h>
124#include <linux/blkdev.h>
125
126static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
127 struct mmc_data *data)
128{
129 host->sg_len = data->sg_len;
130 host->sg_ptr = data->sg;
131 host->sg_off = 0;
132}
133
134static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
135{
136 host->sg_ptr = sg_next(host->sg_ptr);
137 host->sg_off = 0;
138 return --host->sg_len;
139}
140
141static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
142 unsigned long *flags)
143{
144 struct scatterlist *sg = host->sg_ptr;
145
146 local_irq_save(*flags);
147 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
148}
149
150static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
151 unsigned long *flags)
152{
153 kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
154 local_irq_restore(*flags);
155}
156
157#ifdef CONFIG_MMC_DEBUG
158
159#define STATUS_TO_TEXT(a) \
160 do { \
161 if (status & TMIO_STAT_##a) \
162 printf(#a); \
163 } while (0)
164
165void debug_status(u32 status)
166{
167 printk(KERN_DEBUG "status: %08x = ", status);
168 STATUS_TO_TEXT(CARD_REMOVE);
169 STATUS_TO_TEXT(CARD_INSERT);
170 STATUS_TO_TEXT(SIGSTATE);
171 STATUS_TO_TEXT(WRPROTECT);
172 STATUS_TO_TEXT(CARD_REMOVE_A);
173 STATUS_TO_TEXT(CARD_INSERT_A);
174 STATUS_TO_TEXT(SIGSTATE_A);
175 STATUS_TO_TEXT(CMD_IDX_ERR);
176 STATUS_TO_TEXT(STOPBIT_ERR);
177 STATUS_TO_TEXT(ILL_FUNC);
178 STATUS_TO_TEXT(CMD_BUSY);
179 STATUS_TO_TEXT(CMDRESPEND);
180 STATUS_TO_TEXT(DATAEND);
181 STATUS_TO_TEXT(CRCFAIL);
182 STATUS_TO_TEXT(DATATIMEOUT);
183 STATUS_TO_TEXT(CMDTIMEOUT);
184 STATUS_TO_TEXT(RXOVERFLOW);
185 STATUS_TO_TEXT(TXUNDERRUN);
186 STATUS_TO_TEXT(RXRDY);
187 STATUS_TO_TEXT(TXRQ);
188 STATUS_TO_TEXT(ILL_ACCESS);
189 printk("\n");
190}
191
192#else
193#define pr_debug_status(s) do { } while (0)
194#endif
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 02f9cc30d77b..41f361c49b32 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -351,6 +351,13 @@ config MTD_NAND_PASEMI
351 Enables support for NAND Flash interface on PA Semi PWRficient 351 Enables support for NAND Flash interface on PA Semi PWRficient
352 based boards 352 based boards
353 353
354config MTD_NAND_TMIO
355 tristate "NAND Flash device on Toshiba Mobile IO Controller"
356 depends on MTD_NAND && MFD_TMIO
357 help
358 Support for NAND flash connected to a Toshiba Mobile IO
359 Controller in some PDAs, including the Sharp SL6000x.
360
354config MTD_NAND_NANDSIM 361config MTD_NAND_NANDSIM
355 tristate "Support for NAND Flash Simulator" 362 tristate "Support for NAND Flash Simulator"
356 depends on MTD_PARTITIONS 363 depends on MTD_PARTITIONS
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d772581de573..b786c5da82da 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
29obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o 29obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
30obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
30obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o 31obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
31obj-$(CONFIG_MTD_ALAUDA) += alauda.o 32obj-$(CONFIG_MTD_ALAUDA) += alauda.o
32obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o 33obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 64002488c6ee..917cf8d3ae95 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -19,7 +19,7 @@
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/sizes.h> 20#include <asm/sizes.h>
21#include <mach/hardware.h> 21#include <mach/hardware.h>
22#include <asm/plat-orion/orion_nand.h> 22#include <plat/orion_nand.h>
23 23
24#ifdef CONFIG_MTD_CMDLINE_PARTS 24#ifdef CONFIG_MTD_CMDLINE_PARTS
25static const char *part_probes[] = { "cmdlinepart", NULL }; 25static const char *part_probes[] = { "cmdlinepart", NULL };
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
new file mode 100644
index 000000000000..cbab654b03c8
--- /dev/null
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -0,0 +1,556 @@
1/*
2 * Toshiba TMIO NAND flash controller driver
3 *
4 * Slightly murky pre-git history of the driver:
5 *
6 * Copyright (c) Ian Molton 2004, 2005, 2008
7 * Original work, independant of sharps code. Included hardware ECC support.
8 * Hard ECC did not work for writes in the early revisions.
9 * Copyright (c) Dirk Opfer 2005.
10 * Modifications developed from sharps code but
11 * NOT containing any, ported onto Ians base.
12 * Copyright (c) Chris Humbert 2005
13 * Copyright (c) Dmitry Baryshkov 2008
14 * Minor fixes
15 *
16 * Parts copyright Sebastian Carlier
17 *
18 * This file is licensed under
19 * the terms of the GNU General Public License version 2. This program
20 * is licensed "as is" without any warranty of any kind, whether express
21 * or implied.
22 *
23 */
24
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/mfd/core.h>
30#include <linux/mfd/tmio.h>
31#include <linux/delay.h>
32#include <linux/io.h>
33#include <linux/irq.h>
34#include <linux/interrupt.h>
35#include <linux/ioport.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/nand.h>
38#include <linux/mtd/nand_ecc.h>
39#include <linux/mtd/partitions.h>
40
41/*--------------------------------------------------------------------------*/
42
43/*
44 * NAND Flash Host Controller Configuration Register
45 */
46#define CCR_COMMAND 0x04 /* w Command */
47#define CCR_BASE 0x10 /* l NAND Flash Control Reg Base Addr */
48#define CCR_INTP 0x3d /* b Interrupt Pin */
49#define CCR_INTE 0x48 /* b Interrupt Enable */
50#define CCR_EC 0x4a /* b Event Control */
51#define CCR_ICC 0x4c /* b Internal Clock Control */
52#define CCR_ECCC 0x5b /* b ECC Control */
53#define CCR_NFTC 0x60 /* b NAND Flash Transaction Control */
54#define CCR_NFM 0x61 /* b NAND Flash Monitor */
55#define CCR_NFPSC 0x62 /* b NAND Flash Power Supply Control */
56#define CCR_NFDC 0x63 /* b NAND Flash Detect Control */
57
58/*
59 * NAND Flash Control Register
60 */
61#define FCR_DATA 0x00 /* bwl Data Register */
62#define FCR_MODE 0x04 /* b Mode Register */
63#define FCR_STATUS 0x05 /* b Status Register */
64#define FCR_ISR 0x06 /* b Interrupt Status Register */
65#define FCR_IMR 0x07 /* b Interrupt Mask Register */
66
67/* FCR_MODE Register Command List */
68#define FCR_MODE_DATA 0x94 /* Data Data_Mode */
69#define FCR_MODE_COMMAND 0x95 /* Data Command_Mode */
70#define FCR_MODE_ADDRESS 0x96 /* Data Address_Mode */
71
72#define FCR_MODE_HWECC_CALC 0xB4 /* HW-ECC Data */
73#define FCR_MODE_HWECC_RESULT 0xD4 /* HW-ECC Calc result Read_Mode */
74#define FCR_MODE_HWECC_RESET 0xF4 /* HW-ECC Reset */
75
76#define FCR_MODE_POWER_ON 0x0C /* Power Supply ON to SSFDC card */
77#define FCR_MODE_POWER_OFF 0x08 /* Power Supply OFF to SSFDC card */
78
79#define FCR_MODE_LED_OFF 0x00 /* LED OFF */
80#define FCR_MODE_LED_ON 0x04 /* LED ON */
81
82#define FCR_MODE_EJECT_ON 0x68 /* Ejection events active */
83#define FCR_MODE_EJECT_OFF 0x08 /* Ejection events ignored */
84
85#define FCR_MODE_LOCK 0x6C /* Lock_Mode. Eject Switch Invalid */
86#define FCR_MODE_UNLOCK 0x0C /* UnLock_Mode. Eject Switch is valid */
87
88#define FCR_MODE_CONTROLLER_ID 0x40 /* Controller ID Read */
89#define FCR_MODE_STANDBY 0x00 /* SSFDC card Changes Standby State */
90
91#define FCR_MODE_WE 0x80
92#define FCR_MODE_ECC1 0x40
93#define FCR_MODE_ECC0 0x20
94#define FCR_MODE_CE 0x10
95#define FCR_MODE_PCNT1 0x08
96#define FCR_MODE_PCNT0 0x04
97#define FCR_MODE_ALE 0x02
98#define FCR_MODE_CLE 0x01
99
100#define FCR_STATUS_BUSY 0x80
101
102/*--------------------------------------------------------------------------*/
103
104struct tmio_nand {
105 struct mtd_info mtd;
106 struct nand_chip chip;
107
108 struct platform_device *dev;
109
110 void __iomem *ccr;
111 void __iomem *fcr;
112 unsigned long fcr_phys;
113
114 unsigned int irq;
115
116 /* for tmio_nand_read_byte */
117 u8 read;
118 unsigned read_good:1;
119};
120
121#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
122
123#ifdef CONFIG_MTD_CMDLINE_PARTS
124static const char *part_probes[] = { "cmdlinepart", NULL };
125#endif
126
127/*--------------------------------------------------------------------------*/
128
129static void tmio_nand_hwcontrol(struct mtd_info *mtd, int cmd,
130 unsigned int ctrl)
131{
132 struct tmio_nand *tmio = mtd_to_tmio(mtd);
133 struct nand_chip *chip = mtd->priv;
134
135 if (ctrl & NAND_CTRL_CHANGE) {
136 u8 mode;
137
138 if (ctrl & NAND_NCE) {
139 mode = FCR_MODE_DATA;
140
141 if (ctrl & NAND_CLE)
142 mode |= FCR_MODE_CLE;
143 else
144 mode &= ~FCR_MODE_CLE;
145
146 if (ctrl & NAND_ALE)
147 mode |= FCR_MODE_ALE;
148 else
149 mode &= ~FCR_MODE_ALE;
150 } else {
151 mode = FCR_MODE_STANDBY;
152 }
153
154 tmio_iowrite8(mode, tmio->fcr + FCR_MODE);
155 tmio->read_good = 0;
156 }
157
158 if (cmd != NAND_CMD_NONE)
159 tmio_iowrite8(cmd, chip->IO_ADDR_W);
160}
161
162static int tmio_nand_dev_ready(struct mtd_info *mtd)
163{
164 struct tmio_nand *tmio = mtd_to_tmio(mtd);
165
166 return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
167}
168
169static irqreturn_t tmio_irq(int irq, void *__tmio)
170{
171 struct tmio_nand *tmio = __tmio;
172 struct nand_chip *nand_chip = &tmio->chip;
173
174 /* disable RDYREQ interrupt */
175 tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
176
177 if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
178 dev_warn(&tmio->dev->dev, "spurious interrupt\n");
179
180 wake_up(&nand_chip->controller->wq);
181 return IRQ_HANDLED;
182}
183
184/*
185 *The TMIO core has a RDYREQ interrupt on the posedge of #SMRB.
186 *This interrupt is normally disabled, but for long operations like
187 *erase and write, we enable it to wake us up. The irq handler
188 *disables the interrupt.
189 */
190static int
191tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
192{
193 struct tmio_nand *tmio = mtd_to_tmio(mtd);
194 long timeout;
195
196 /* enable RDYREQ interrupt */
197 tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
198 tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
199
200 timeout = wait_event_timeout(nand_chip->controller->wq,
201 tmio_nand_dev_ready(mtd),
202 msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
203
204 if (unlikely(!tmio_nand_dev_ready(mtd))) {
205 tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
206 dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
207 nand_chip->state == FL_ERASING ? "erase" : "program",
208 nand_chip->state == FL_ERASING ? 400 : 20);
209
210 } else if (unlikely(!timeout)) {
211 tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
212 dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
213 }
214
215 nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
216 return nand_chip->read_byte(mtd);
217}
218
219/*
220 *The TMIO controller combines two 8-bit data bytes into one 16-bit
221 *word. This function separates them so nand_base.c works as expected,
222 *especially its NAND_CMD_READID routines.
223 *
224 *To prevent stale data from being read, tmio_nand_hwcontrol() clears
225 *tmio->read_good.
226 */
227static u_char tmio_nand_read_byte(struct mtd_info *mtd)
228{
229 struct tmio_nand *tmio = mtd_to_tmio(mtd);
230 unsigned int data;
231
232 if (tmio->read_good--)
233 return tmio->read;
234
235 data = tmio_ioread16(tmio->fcr + FCR_DATA);
236 tmio->read = data >> 8;
237 return data;
238}
239
240/*
241 *The TMIO controller converts an 8-bit NAND interface to a 16-bit
242 *bus interface, so all data reads and writes must be 16-bit wide.
243 *Thus, we implement 16-bit versions of the read, write, and verify
244 *buffer functions.
245 */
246static void
247tmio_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
248{
249 struct tmio_nand *tmio = mtd_to_tmio(mtd);
250
251 tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
252}
253
254static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
255{
256 struct tmio_nand *tmio = mtd_to_tmio(mtd);
257
258 tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
259}
260
261static int
262tmio_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
263{
264 struct tmio_nand *tmio = mtd_to_tmio(mtd);
265 u16 *p = (u16 *) buf;
266
267 for (len >>= 1; len; len--)
268 if (*(p++) != tmio_ioread16(tmio->fcr + FCR_DATA))
269 return -EFAULT;
270 return 0;
271}
272
273static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
274{
275 struct tmio_nand *tmio = mtd_to_tmio(mtd);
276
277 tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
278 tmio_ioread8(tmio->fcr + FCR_DATA); /* dummy read */
279 tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
280}
281
282static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
283 u_char *ecc_code)
284{
285 struct tmio_nand *tmio = mtd_to_tmio(mtd);
286 unsigned int ecc;
287
288 tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
289
290 ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
291 ecc_code[1] = ecc; /* 000-255 LP7-0 */
292 ecc_code[0] = ecc >> 8; /* 000-255 LP15-8 */
293 ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
294 ecc_code[2] = ecc; /* 000-255 CP5-0,11b */
295 ecc_code[4] = ecc >> 8; /* 256-511 LP7-0 */
296 ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
297 ecc_code[3] = ecc; /* 256-511 LP15-8 */
298 ecc_code[5] = ecc >> 8; /* 256-511 CP5-0,11b */
299
300 tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE);
301 return 0;
302}
303
304static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
305{
306 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
307 int ret;
308
309 if (cell->enable) {
310 ret = cell->enable(dev);
311 if (ret)
312 return ret;
313 }
314
315 /* (4Ch) CLKRUN Enable 1st spcrunc */
316 tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
317
318 /* (10h)BaseAddress 0x1000 spba.spba2 */
319 tmio_iowrite16(tmio->fcr_phys, tmio->ccr + CCR_BASE);
320 tmio_iowrite16(tmio->fcr_phys >> 16, tmio->ccr + CCR_BASE + 16);
321
322 /* (04h)Command Register I/O spcmd */
323 tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
324
325 /* (62h) Power Supply Control ssmpwc */
326 /* HardPowerOFF - SuspendOFF - PowerSupplyWait_4MS */
327 tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC);
328
329 /* (63h) Detect Control ssmdtc */
330 tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC);
331
332 /* Interrupt status register clear sintst */
333 tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
334
335 /* After power supply, Media are reset smode */
336 tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE);
337 tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE);
338 tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA);
339
340 /* Standby Mode smode */
341 tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE);
342
343 mdelay(5);
344
345 return 0;
346}
347
348static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
349{
350 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
351
352 tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
353 if (cell->disable)
354 cell->disable(dev);
355}
356
357static int tmio_probe(struct platform_device *dev)
358{
359 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
360 struct tmio_nand_data *data = cell->driver_data;
361 struct resource *fcr = platform_get_resource(dev,
362 IORESOURCE_MEM, 0);
363 struct resource *ccr = platform_get_resource(dev,
364 IORESOURCE_MEM, 1);
365 int irq = platform_get_irq(dev, 0);
366 struct tmio_nand *tmio;
367 struct mtd_info *mtd;
368 struct nand_chip *nand_chip;
369#ifdef CONFIG_MTD_PARTITIONS
370 struct mtd_partition *parts;
371 int nbparts = 0;
372#endif
373 int retval;
374
375 if (data == NULL)
376 dev_warn(&dev->dev, "NULL platform data!\n");
377
378 tmio = kzalloc(sizeof *tmio, GFP_KERNEL);
379 if (!tmio) {
380 retval = -ENOMEM;
381 goto err_kzalloc;
382 }
383
384 tmio->dev = dev;
385
386 platform_set_drvdata(dev, tmio);
387 mtd = &tmio->mtd;
388 nand_chip = &tmio->chip;
389 mtd->priv = nand_chip;
390 mtd->name = "tmio-nand";
391
392 tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1);
393 if (!tmio->ccr) {
394 retval = -EIO;
395 goto err_iomap_ccr;
396 }
397
398 tmio->fcr_phys = (unsigned long)fcr->start;
399 tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1);
400 if (!tmio->fcr) {
401 retval = -EIO;
402 goto err_iomap_fcr;
403 }
404
405 retval = tmio_hw_init(dev, tmio);
406 if (retval)
407 goto err_hwinit;
408
409 /* Set address of NAND IO lines */
410 nand_chip->IO_ADDR_R = tmio->fcr;
411 nand_chip->IO_ADDR_W = tmio->fcr;
412
413 /* Set address of hardware control function */
414 nand_chip->cmd_ctrl = tmio_nand_hwcontrol;
415 nand_chip->dev_ready = tmio_nand_dev_ready;
416 nand_chip->read_byte = tmio_nand_read_byte;
417 nand_chip->write_buf = tmio_nand_write_buf;
418 nand_chip->read_buf = tmio_nand_read_buf;
419 nand_chip->verify_buf = tmio_nand_verify_buf;
420
421 /* set eccmode using hardware ECC */
422 nand_chip->ecc.mode = NAND_ECC_HW;
423 nand_chip->ecc.size = 512;
424 nand_chip->ecc.bytes = 6;
425 nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
426 nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
427 nand_chip->ecc.correct = nand_correct_data;
428
429 if (data)
430 nand_chip->badblock_pattern = data->badblock_pattern;
431
432 /* 15 us command delay time */
433 nand_chip->chip_delay = 15;
434
435 retval = request_irq(irq, &tmio_irq,
436 IRQF_DISABLED, dev->dev.bus_id, tmio);
437 if (retval) {
438 dev_err(&dev->dev, "request_irq error %d\n", retval);
439 goto err_irq;
440 }
441
442 tmio->irq = irq;
443 nand_chip->waitfunc = tmio_nand_wait;
444
445 /* Scan to find existence of the device */
446 if (nand_scan(mtd, 1)) {
447 retval = -ENODEV;
448 goto err_scan;
449 }
450 /* Register the partitions */
451#ifdef CONFIG_MTD_PARTITIONS
452#ifdef CONFIG_MTD_CMDLINE_PARTS
453 nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
454#endif
455 if (nbparts <= 0 && data) {
456 parts = data->partition;
457 nbparts = data->num_partitions;
458 }
459
460 if (nbparts)
461 retval = add_mtd_partitions(mtd, parts, nbparts);
462 else
463#endif
464 retval = add_mtd_device(mtd);
465
466 if (!retval)
467 return retval;
468
469 nand_release(mtd);
470
471err_scan:
472 if (tmio->irq)
473 free_irq(tmio->irq, tmio);
474err_irq:
475 tmio_hw_stop(dev, tmio);
476err_hwinit:
477 iounmap(tmio->fcr);
478err_iomap_fcr:
479 iounmap(tmio->ccr);
480err_iomap_ccr:
481 kfree(tmio);
482err_kzalloc:
483 return retval;
484}
485
486static int tmio_remove(struct platform_device *dev)
487{
488 struct tmio_nand *tmio = platform_get_drvdata(dev);
489
490 nand_release(&tmio->mtd);
491 if (tmio->irq)
492 free_irq(tmio->irq, tmio);
493 tmio_hw_stop(dev, tmio);
494 iounmap(tmio->fcr);
495 iounmap(tmio->ccr);
496 kfree(tmio);
497 return 0;
498}
499
500#ifdef CONFIG_PM
501static int tmio_suspend(struct platform_device *dev, pm_message_t state)
502{
503 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
504
505 if (cell->suspend)
506 cell->suspend(dev);
507
508 tmio_hw_stop(dev, platform_get_drvdata(dev));
509 return 0;
510}
511
512static int tmio_resume(struct platform_device *dev)
513{
514 struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
515
516 /* FIXME - is this required or merely another attack of the broken
517 * SHARP platform? Looks suspicious.
518 */
519 tmio_hw_init(dev, platform_get_drvdata(dev));
520
521 if (cell->resume)
522 cell->resume(dev);
523
524 return 0;
525}
526#else
527#define tmio_suspend NULL
528#define tmio_resume NULL
529#endif
530
531static struct platform_driver tmio_driver = {
532 .driver.name = "tmio-nand",
533 .driver.owner = THIS_MODULE,
534 .probe = tmio_probe,
535 .remove = tmio_remove,
536 .suspend = tmio_suspend,
537 .resume = tmio_resume,
538};
539
540static int __init tmio_init(void)
541{
542 return platform_driver_register(&tmio_driver);
543}
544
545static void __exit tmio_exit(void)
546{
547 platform_driver_unregister(&tmio_driver);
548}
549
550module_init(tmio_init);
551module_exit(tmio_exit);
552
553MODULE_LICENSE("GPL v2");
554MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
555MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller");
556MODULE_ALIAS("platform:tmio-nand");
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 35264c244cfd..82d7be1655d3 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -47,7 +47,7 @@ MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
47MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
48MODULE_VERSION(DRV_VERSION); 48MODULE_VERSION(DRV_VERSION);
49 49
50static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter); 50static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
51 51
52static const u16 52static const u16
53atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = 53atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
@@ -1037,7 +1037,7 @@ static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
1037 return; 1037 return;
1038} 1038}
1039 1039
1040static inline void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) 1040static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
1041{ 1041{
1042 u32 value; 1042 u32 value;
1043 struct atl1e_hw *hw = &adapter->hw; 1043 struct atl1e_hw *hw = &adapter->hw;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 4bf4f7b205f2..b468f904c7f8 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -40,20 +40,20 @@
40#define DP(__mask, __fmt, __args...) do { \ 40#define DP(__mask, __fmt, __args...) do { \
41 if (bp->msglevel & (__mask)) \ 41 if (bp->msglevel & (__mask)) \
42 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 42 printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
43 bp->dev?(bp->dev->name):"?", ##__args); \ 43 bp->dev ? (bp->dev->name) : "?", ##__args); \
44 } while (0) 44 } while (0)
45 45
46/* errors debug print */ 46/* errors debug print */
47#define BNX2X_DBG_ERR(__fmt, __args...) do { \ 47#define BNX2X_DBG_ERR(__fmt, __args...) do { \
48 if (bp->msglevel & NETIF_MSG_PROBE) \ 48 if (bp->msglevel & NETIF_MSG_PROBE) \
49 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 49 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
50 bp->dev?(bp->dev->name):"?", ##__args); \ 50 bp->dev ? (bp->dev->name) : "?", ##__args); \
51 } while (0) 51 } while (0)
52 52
53/* for errors (never masked) */ 53/* for errors (never masked) */
54#define BNX2X_ERR(__fmt, __args...) do { \ 54#define BNX2X_ERR(__fmt, __args...) do { \
55 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \ 55 printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
56 bp->dev?(bp->dev->name):"?", ##__args); \ 56 bp->dev ? (bp->dev->name) : "?", ##__args); \
57 } while (0) 57 } while (0)
58 58
59/* before we have a dev->name use dev_info() */ 59/* before we have a dev->name use dev_info() */
@@ -120,16 +120,8 @@
120#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field)) 120#define SHMEM_RD(bp, field) REG_RD(bp, SHMEM_ADDR(bp, field))
121#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val) 121#define SHMEM_WR(bp, field, val) REG_WR(bp, SHMEM_ADDR(bp, field), val)
122 122
123#define NIG_WR(reg, val) REG_WR(bp, reg, val) 123#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
124#define EMAC_WR(reg, val) REG_WR(bp, emac_base + reg, val) 124#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
125#define BMAC_WR(reg, val) REG_WR(bp, GRCBASE_NIG + bmac_addr + reg, val)
126
127
128#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
129
130#define for_each_nondefault_queue(bp, var) \
131 for (var = 1; var < bp->num_queues; var++)
132#define is_multi(bp) (bp->num_queues > 1)
133 125
134 126
135/* fast path */ 127/* fast path */
@@ -163,7 +155,7 @@ struct sw_rx_page {
163#define NUM_RX_SGE_PAGES 2 155#define NUM_RX_SGE_PAGES 2
164#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge)) 156#define RX_SGE_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
165#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2) 157#define MAX_RX_SGE_CNT (RX_SGE_CNT - 2)
166/* RX_SGE_CNT is promissed to be a power of 2 */ 158/* RX_SGE_CNT is promised to be a power of 2 */
167#define RX_SGE_MASK (RX_SGE_CNT - 1) 159#define RX_SGE_MASK (RX_SGE_CNT - 1)
168#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES) 160#define NUM_RX_SGE (RX_SGE_CNT * NUM_RX_SGE_PAGES)
169#define MAX_RX_SGE (NUM_RX_SGE - 1) 161#define MAX_RX_SGE (NUM_RX_SGE - 1)
@@ -258,8 +250,7 @@ struct bnx2x_fastpath {
258 250
259 unsigned long tx_pkt, 251 unsigned long tx_pkt,
260 rx_pkt, 252 rx_pkt,
261 rx_calls, 253 rx_calls;
262 rx_alloc_failed;
263 /* TPA related */ 254 /* TPA related */
264 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H]; 255 struct sw_rx_bd tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
265 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H]; 256 u8 tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
@@ -275,6 +266,15 @@ struct bnx2x_fastpath {
275 266
276#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 267#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
277 268
269#define BNX2X_HAS_TX_WORK(fp) \
270 ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
271 (fp->tx_pkt_prod != fp->tx_pkt_cons))
272
273#define BNX2X_HAS_RX_WORK(fp) \
274 (fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb))
275
276#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
277
278 278
279/* MC hsi */ 279/* MC hsi */
280#define MAX_FETCH_BD 13 /* HW max BDs per packet */ 280#define MAX_FETCH_BD 13 /* HW max BDs per packet */
@@ -317,7 +317,7 @@ struct bnx2x_fastpath {
317#define RCQ_BD(x) ((x) & MAX_RCQ_BD) 317#define RCQ_BD(x) ((x) & MAX_RCQ_BD)
318 318
319 319
320/* This is needed for determening of last_max */ 320/* This is needed for determining of last_max */
321#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b)) 321#define SUB_S16(a, b) (s16)((s16)(a) - (s16)(b))
322 322
323#define __SGE_MASK_SET_BIT(el, bit) \ 323#define __SGE_MASK_SET_BIT(el, bit) \
@@ -386,20 +386,28 @@ struct bnx2x_fastpath {
386#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \ 386#define TPA_TYPE(cqe_fp_flags) ((cqe_fp_flags) & \
387 (TPA_TYPE_START | TPA_TYPE_END)) 387 (TPA_TYPE_START | TPA_TYPE_END))
388 388
389#define BNX2X_RX_SUM_OK(cqe) \ 389#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
390 (!(cqe->fast_path_cqe.status_flags & \ 390
391 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | \ 391#define BNX2X_IP_CSUM_ERR(cqe) \
392 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))) 392 (!((cqe)->fast_path_cqe.status_flags & \
393 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
394 ((cqe)->fast_path_cqe.type_error_flags & \
395 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
396
397#define BNX2X_L4_CSUM_ERR(cqe) \
398 (!((cqe)->fast_path_cqe.status_flags & \
399 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
400 ((cqe)->fast_path_cqe.type_error_flags & \
401 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
402
403#define BNX2X_RX_CSUM_OK(cqe) \
404 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
393 405
394#define BNX2X_RX_SUM_FIX(cqe) \ 406#define BNX2X_RX_SUM_FIX(cqe) \
395 ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \ 407 ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & \
396 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \ 408 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) == \
397 (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT)) 409 (1 << PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
398 410
399#define ETH_RX_ERROR_FALGS (ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG | \
400 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | \
401 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
402
403 411
404#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES) 412#define FP_USB_FUNC_OFF (2 + 2*HC_USTORM_SB_NUM_INDICES)
405#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES) 413#define FP_CSB_FUNC_OFF (2 + 2*HC_CSTORM_SB_NUM_INDICES)
@@ -647,6 +655,8 @@ struct bnx2x_eth_stats {
647 655
648 u32 brb_drop_hi; 656 u32 brb_drop_hi;
649 u32 brb_drop_lo; 657 u32 brb_drop_lo;
658 u32 brb_truncate_hi;
659 u32 brb_truncate_lo;
650 660
651 u32 jabber_packets_received; 661 u32 jabber_packets_received;
652 662
@@ -663,6 +673,9 @@ struct bnx2x_eth_stats {
663 u32 mac_discard; 673 u32 mac_discard;
664 674
665 u32 driver_xoff; 675 u32 driver_xoff;
676 u32 rx_err_discard_pkt;
677 u32 rx_skb_alloc_failed;
678 u32 hw_csum_err;
666}; 679};
667 680
668#define STATS_OFFSET32(stat_name) \ 681#define STATS_OFFSET32(stat_name) \
@@ -753,7 +766,6 @@ struct bnx2x {
753 u16 def_att_idx; 766 u16 def_att_idx;
754 u32 attn_state; 767 u32 attn_state;
755 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS]; 768 struct attn_route attn_group[MAX_DYNAMIC_ATTN_GRPS];
756 u32 aeu_mask;
757 u32 nig_mask; 769 u32 nig_mask;
758 770
759 /* slow path ring */ 771 /* slow path ring */
@@ -772,7 +784,7 @@ struct bnx2x {
772 u8 stats_pending; 784 u8 stats_pending;
773 u8 set_mac_pending; 785 u8 set_mac_pending;
774 786
775 /* End of fileds used in the performance code paths */ 787 /* End of fields used in the performance code paths */
776 788
777 int panic; 789 int panic;
778 int msglevel; 790 int msglevel;
@@ -794,9 +806,6 @@ struct bnx2x {
794#define BP_FUNC(bp) (bp->func) 806#define BP_FUNC(bp) (bp->func)
795#define BP_E1HVN(bp) (bp->func >> 1) 807#define BP_E1HVN(bp) (bp->func >> 1)
796#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 808#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
797/* assorted E1HVN */
798#define IS_E1HMF(bp) (bp->e1hmf != 0)
799#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
800 809
801 int pm_cap; 810 int pm_cap;
802 int pcie_cap; 811 int pcie_cap;
@@ -821,6 +830,7 @@ struct bnx2x {
821 u32 mf_config; 830 u32 mf_config;
822 u16 e1hov; 831 u16 e1hov;
823 u8 e1hmf; 832 u8 e1hmf;
833#define IS_E1HMF(bp) (bp->e1hmf != 0)
824 834
825 u8 wol; 835 u8 wol;
826 836
@@ -836,7 +846,6 @@ struct bnx2x {
836 u16 rx_ticks_int; 846 u16 rx_ticks_int;
837 u16 rx_ticks; 847 u16 rx_ticks;
838 848
839 u32 stats_ticks;
840 u32 lin_cnt; 849 u32 lin_cnt;
841 850
842 int state; 851 int state;
@@ -852,6 +861,7 @@ struct bnx2x {
852#define BNX2X_STATE_ERROR 0xf000 861#define BNX2X_STATE_ERROR 0xf000
853 862
854 int num_queues; 863 int num_queues;
864#define BP_MAX_QUEUES(bp) (IS_E1HMF(bp) ? 4 : 16)
855 865
856 u32 rx_mode; 866 u32 rx_mode;
857#define BNX2X_RX_MODE_NONE 0 867#define BNX2X_RX_MODE_NONE 0
@@ -902,10 +912,17 @@ struct bnx2x {
902}; 912};
903 913
904 914
915#define for_each_queue(bp, var) for (var = 0; var < bp->num_queues; var++)
916
917#define for_each_nondefault_queue(bp, var) \
918 for (var = 1; var < bp->num_queues; var++)
919#define is_multi(bp) (bp->num_queues > 1)
920
921
905void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 922void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
906void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr, 923void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
907 u32 len32); 924 u32 len32);
908int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode); 925int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
909 926
910static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, 927static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
911 int wait) 928 int wait)
@@ -976,7 +993,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
976#define PCICFG_LINK_SPEED_SHIFT 16 993#define PCICFG_LINK_SPEED_SHIFT 16
977 994
978 995
979#define BNX2X_NUM_STATS 39 996#define BNX2X_NUM_STATS 42
980#define BNX2X_NUM_TESTS 8 997#define BNX2X_NUM_TESTS 8
981 998
982#define BNX2X_MAC_LOOPBACK 0 999#define BNX2X_MAC_LOOPBACK 0
@@ -1007,10 +1024,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1007/* resolution of the rate shaping timer - 100 usec */ 1024/* resolution of the rate shaping timer - 100 usec */
1008#define RS_PERIODIC_TIMEOUT_USEC 100 1025#define RS_PERIODIC_TIMEOUT_USEC 100
1009/* resolution of fairness algorithm in usecs - 1026/* resolution of fairness algorithm in usecs -
1010 coefficient for clauclating the actuall t fair */ 1027 coefficient for calculating the actual t fair */
1011#define T_FAIR_COEF 10000000 1028#define T_FAIR_COEF 10000000
1012/* number of bytes in single QM arbitration cycle - 1029/* number of bytes in single QM arbitration cycle -
1013 coeffiecnt for calculating the fairness timer */ 1030 coefficient for calculating the fairness timer */
1014#define QM_ARB_BYTES 40000 1031#define QM_ARB_BYTES 40000
1015#define FAIR_MEM 2 1032#define FAIR_MEM 2
1016 1033
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index e3da7f69d27b..192fa981b930 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -9,165 +9,171 @@
9 9
10 10
11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \ 11#define CSTORM_ASSERT_LIST_INDEX_OFFSET \
12 (IS_E1H_OFFSET? 0x7000 : 0x1000) 12 (IS_E1H_OFFSET ? 0x7000 : 0x1000)
13#define CSTORM_ASSERT_LIST_OFFSET(idx) \ 13#define CSTORM_ASSERT_LIST_OFFSET(idx) \
14 (IS_E1H_OFFSET? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 14 (IS_E1H_OFFSET ? (0x7020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
15#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 15#define CSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
16 (IS_E1H_OFFSET? (0x8522 + ((function>>1) * 0x40) + ((function&1) \ 16 (IS_E1H_OFFSET ? (0x8522 + ((function>>1) * 0x40) + \
17 * 0x100) + (index * 0x4)) : (0x1922 + (function * 0x40) + (index \ 17 ((function&1) * 0x100) + (index * 0x4)) : (0x1922 + (function * \
18 * 0x4))) 18 0x40) + (index * 0x4)))
19#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 19#define CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
20 (IS_E1H_OFFSET? (0x8500 + ((function>>1) * 0x40) + ((function&1) \ 20 (IS_E1H_OFFSET ? (0x8500 + ((function>>1) * 0x40) + \
21 * 0x100)) : (0x1900 + (function * 0x40))) 21 ((function&1) * 0x100)) : (0x1900 + (function * 0x40)))
22#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 22#define CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
23 (IS_E1H_OFFSET? (0x8508 + ((function>>1) * 0x40) + ((function&1) \ 23 (IS_E1H_OFFSET ? (0x8508 + ((function>>1) * 0x40) + \
24 * 0x100)) : (0x1908 + (function * 0x40))) 24 ((function&1) * 0x100)) : (0x1908 + (function * 0x40)))
25#define CSTORM_FUNCTION_MODE_OFFSET \ 25#define CSTORM_FUNCTION_MODE_OFFSET \
26 (IS_E1H_OFFSET? 0x11e8 : 0xffffffff) 26 (IS_E1H_OFFSET ? 0x11e8 : 0xffffffff)
27#define CSTORM_HC_BTR_OFFSET(port) \ 27#define CSTORM_HC_BTR_OFFSET(port) \
28 (IS_E1H_OFFSET? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0))) 28 (IS_E1H_OFFSET ? (0x8704 + (port * 0xf0)) : (0x1984 + (port * 0xc0)))
29#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ 29#define CSTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
30 (IS_E1H_OFFSET? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \ 30 (IS_E1H_OFFSET ? (0x801a + (port * 0x280) + (cpu_id * 0x28) + \
31 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ 31 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
32 (index * 0x4))) 32 (index * 0x4)))
33#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ 33#define CSTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
34 (IS_E1H_OFFSET? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \ 34 (IS_E1H_OFFSET ? (0x8018 + (port * 0x280) + (cpu_id * 0x28) + \
35 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ 35 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
36 (index * 0x4))) 36 (index * 0x4)))
37#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ 37#define CSTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
38 (IS_E1H_OFFSET? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \ 38 (IS_E1H_OFFSET ? (0x8000 + (port * 0x280) + (cpu_id * 0x28)) : \
39 (0x1400 + (port * 0x280) + (cpu_id * 0x28))) 39 (0x1400 + (port * 0x280) + (cpu_id * 0x28)))
40#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ 40#define CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
41 (IS_E1H_OFFSET? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \ 41 (IS_E1H_OFFSET ? (0x8008 + (port * 0x280) + (cpu_id * 0x28)) : \
42 (0x1408 + (port * 0x280) + (cpu_id * 0x28))) 42 (0x1408 + (port * 0x280) + (cpu_id * 0x28)))
43#define CSTORM_STATS_FLAGS_OFFSET(function) \ 43#define CSTORM_STATS_FLAGS_OFFSET(function) \
44 (IS_E1H_OFFSET? (0x1108 + (function * 0x8)) : (0x5108 + \ 44 (IS_E1H_OFFSET ? (0x1108 + (function * 0x8)) : (0x5108 + \
45 (function * 0x8))) 45 (function * 0x8)))
46#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \ 46#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(function) \
47 (IS_E1H_OFFSET? (0x31c0 + (function * 0x20)) : 0xffffffff) 47 (IS_E1H_OFFSET ? (0x31c0 + (function * 0x20)) : 0xffffffff)
48#define TSTORM_ASSERT_LIST_INDEX_OFFSET \ 48#define TSTORM_ASSERT_LIST_INDEX_OFFSET \
49 (IS_E1H_OFFSET? 0xa000 : 0x1000) 49 (IS_E1H_OFFSET ? 0xa000 : 0x1000)
50#define TSTORM_ASSERT_LIST_OFFSET(idx) \ 50#define TSTORM_ASSERT_LIST_OFFSET(idx) \
51 (IS_E1H_OFFSET? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 51 (IS_E1H_OFFSET ? (0xa020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
52#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \ 52#define TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) \
53 (IS_E1H_OFFSET? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) : \ 53 (IS_E1H_OFFSET ? (0x3358 + (port * 0x3e8) + (client_id * 0x28)) \
54 (0x9c8 + (port * 0x2f8) + (client_id * 0x28))) 54 : (0x9c8 + (port * 0x2f8) + (client_id * 0x28)))
55#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 55#define TSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
56 (IS_E1H_OFFSET? (0xb01a + ((function>>1) * 0x28) + ((function&1) \ 56 (IS_E1H_OFFSET ? (0xb01a + ((function>>1) * 0x28) + \
57 * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ 57 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
58 0x4))) 58 0x28) + (index * 0x4)))
59#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 59#define TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
60 (IS_E1H_OFFSET? (0xb000 + ((function>>1) * 0x28) + ((function&1) \ 60 (IS_E1H_OFFSET ? (0xb000 + ((function>>1) * 0x28) + \
61 * 0xa0)) : (0x1400 + (function * 0x28))) 61 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
62#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 62#define TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
63 (IS_E1H_OFFSET? (0xb008 + ((function>>1) * 0x28) + ((function&1) \ 63 (IS_E1H_OFFSET ? (0xb008 + ((function>>1) * 0x28) + \
64 * 0xa0)) : (0x1408 + (function * 0x28))) 64 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
65#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 65#define TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
66 (IS_E1H_OFFSET? (0x2b80 + (function * 0x8)) : (0x4b68 + \ 66 (IS_E1H_OFFSET ? (0x2b80 + (function * 0x8)) : (0x4b68 + \
67 (function * 0x8))) 67 (function * 0x8)))
68#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \ 68#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(function) \
69 (IS_E1H_OFFSET? (0x3000 + (function * 0x38)) : (0x1500 + \ 69 (IS_E1H_OFFSET ? (0x3000 + (function * 0x38)) : (0x1500 + \
70 (function * 0x38))) 70 (function * 0x38)))
71#define TSTORM_FUNCTION_MODE_OFFSET \ 71#define TSTORM_FUNCTION_MODE_OFFSET \
72 (IS_E1H_OFFSET? 0x1ad0 : 0xffffffff) 72 (IS_E1H_OFFSET ? 0x1ad0 : 0xffffffff)
73#define TSTORM_HC_BTR_OFFSET(port) \ 73#define TSTORM_HC_BTR_OFFSET(port) \
74 (IS_E1H_OFFSET? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 74 (IS_E1H_OFFSET ? (0xb144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
75#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \ 75#define TSTORM_INDIRECTION_TABLE_OFFSET(function) \
76 (IS_E1H_OFFSET? (0x12c8 + (function * 0x80)) : (0x22c8 + \ 76 (IS_E1H_OFFSET ? (0x12c8 + (function * 0x80)) : (0x22c8 + \
77 (function * 0x80))) 77 (function * 0x80)))
78#define TSTORM_INDIRECTION_TABLE_SIZE 0x80 78#define TSTORM_INDIRECTION_TABLE_SIZE 0x80
79#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \ 79#define TSTORM_MAC_FILTER_CONFIG_OFFSET(function) \
80 (IS_E1H_OFFSET? (0x3008 + (function * 0x38)) : (0x1508 + \ 80 (IS_E1H_OFFSET ? (0x3008 + (function * 0x38)) : (0x1508 + \
81 (function * 0x38))) 81 (function * 0x38)))
82#define TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
83 (IS_E1H_OFFSET ? (0x2010 + (port * 0x5b0) + (stats_counter_id * \
84 0x50)) : (0x4000 + (port * 0x3f0) + (stats_counter_id * 0x38)))
82#define TSTORM_RX_PRODS_OFFSET(port, client_id) \ 85#define TSTORM_RX_PRODS_OFFSET(port, client_id) \
83 (IS_E1H_OFFSET? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) : \ 86 (IS_E1H_OFFSET ? (0x3350 + (port * 0x3e8) + (client_id * 0x28)) \
84 (0x9c0 + (port * 0x2f8) + (client_id * 0x28))) 87 : (0x9c0 + (port * 0x2f8) + (client_id * 0x28)))
85#define TSTORM_STATS_FLAGS_OFFSET(function) \ 88#define TSTORM_STATS_FLAGS_OFFSET(function) \
86 (IS_E1H_OFFSET? (0x2c00 + (function * 0x8)) : (0x4b88 + \ 89 (IS_E1H_OFFSET ? (0x2c00 + (function * 0x8)) : (0x4b88 + \
87 (function * 0x8))) 90 (function * 0x8)))
88#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET? 0x3b30 : 0x1c20) 91#define TSTORM_TPA_EXIST_OFFSET (IS_E1H_OFFSET ? 0x3b30 : 0x1c20)
89#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET? 0xa040 : 0x2c10) 92#define USTORM_AGG_DATA_OFFSET (IS_E1H_OFFSET ? 0xa040 : 0x2c10)
90#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET? 0x2440 : 0x1200) 93#define USTORM_AGG_DATA_SIZE (IS_E1H_OFFSET ? 0x2440 : 0x1200)
91#define USTORM_ASSERT_LIST_INDEX_OFFSET \ 94#define USTORM_ASSERT_LIST_INDEX_OFFSET \
92 (IS_E1H_OFFSET? 0x8000 : 0x1000) 95 (IS_E1H_OFFSET ? 0x8000 : 0x1000)
93#define USTORM_ASSERT_LIST_OFFSET(idx) \ 96#define USTORM_ASSERT_LIST_OFFSET(idx) \
94 (IS_E1H_OFFSET? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 97 (IS_E1H_OFFSET ? (0x8020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
95#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \ 98#define USTORM_CQE_PAGE_BASE_OFFSET(port, clientId) \
96 (IS_E1H_OFFSET? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \ 99 (IS_E1H_OFFSET ? (0x3298 + (port * 0x258) + (clientId * 0x18)) : \
97 (0x5450 + (port * 0x1c8) + (clientId * 0x18))) 100 (0x5450 + (port * 0x1c8) + (clientId * 0x18)))
98#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 101#define USTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
99 (IS_E1H_OFFSET? (0x951a + ((function>>1) * 0x28) + ((function&1) \ 102 (IS_E1H_OFFSET ? (0x951a + ((function>>1) * 0x28) + \
100 * 0xa0) + (index * 0x4)) : (0x191a + (function * 0x28) + (index * \ 103 ((function&1) * 0xa0) + (index * 0x4)) : (0x191a + (function * \
101 0x4))) 104 0x28) + (index * 0x4)))
102#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 105#define USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
103 (IS_E1H_OFFSET? (0x9500 + ((function>>1) * 0x28) + ((function&1) \ 106 (IS_E1H_OFFSET ? (0x9500 + ((function>>1) * 0x28) + \
104 * 0xa0)) : (0x1900 + (function * 0x28))) 107 ((function&1) * 0xa0)) : (0x1900 + (function * 0x28)))
105#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 108#define USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
106 (IS_E1H_OFFSET? (0x9508 + ((function>>1) * 0x28) + ((function&1) \ 109 (IS_E1H_OFFSET ? (0x9508 + ((function>>1) * 0x28) + \
107 * 0xa0)) : (0x1908 + (function * 0x28))) 110 ((function&1) * 0xa0)) : (0x1908 + (function * 0x28)))
108#define USTORM_FUNCTION_MODE_OFFSET \ 111#define USTORM_FUNCTION_MODE_OFFSET \
109 (IS_E1H_OFFSET? 0x2448 : 0xffffffff) 112 (IS_E1H_OFFSET ? 0x2448 : 0xffffffff)
110#define USTORM_HC_BTR_OFFSET(port) \ 113#define USTORM_HC_BTR_OFFSET(port) \
111 (IS_E1H_OFFSET? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8))) 114 (IS_E1H_OFFSET ? (0x9644 + (port * 0xd0)) : (0x1954 + (port * 0xb8)))
112#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \ 115#define USTORM_MAX_AGG_SIZE_OFFSET(port, clientId) \
113 (IS_E1H_OFFSET? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \ 116 (IS_E1H_OFFSET ? (0x3290 + (port * 0x258) + (clientId * 0x18)) : \
114 (0x5448 + (port * 0x1c8) + (clientId * 0x18))) 117 (0x5448 + (port * 0x1c8) + (clientId * 0x18)))
115#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \ 118#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(function) \
116 (IS_E1H_OFFSET? (0x2408 + (function * 0x8)) : (0x5408 + \ 119 (IS_E1H_OFFSET ? (0x2408 + (function * 0x8)) : (0x5408 + \
117 (function * 0x8))) 120 (function * 0x8)))
118#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \ 121#define USTORM_SB_HC_DISABLE_OFFSET(port, cpu_id, index) \
119 (IS_E1H_OFFSET? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \ 122 (IS_E1H_OFFSET ? (0x901a + (port * 0x280) + (cpu_id * 0x28) + \
120 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \ 123 (index * 0x4)) : (0x141a + (port * 0x280) + (cpu_id * 0x28) + \
121 (index * 0x4))) 124 (index * 0x4)))
122#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \ 125#define USTORM_SB_HC_TIMEOUT_OFFSET(port, cpu_id, index) \
123 (IS_E1H_OFFSET? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \ 126 (IS_E1H_OFFSET ? (0x9018 + (port * 0x280) + (cpu_id * 0x28) + \
124 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \ 127 (index * 0x4)) : (0x1418 + (port * 0x280) + (cpu_id * 0x28) + \
125 (index * 0x4))) 128 (index * 0x4)))
126#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \ 129#define USTORM_SB_HOST_SB_ADDR_OFFSET(port, cpu_id) \
127 (IS_E1H_OFFSET? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \ 130 (IS_E1H_OFFSET ? (0x9000 + (port * 0x280) + (cpu_id * 0x28)) : \
128 (0x1400 + (port * 0x280) + (cpu_id * 0x28))) 131 (0x1400 + (port * 0x280) + (cpu_id * 0x28)))
129#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \ 132#define USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, cpu_id) \
130 (IS_E1H_OFFSET? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \ 133 (IS_E1H_OFFSET ? (0x9008 + (port * 0x280) + (cpu_id * 0x28)) : \
131 (0x1408 + (port * 0x280) + (cpu_id * 0x28))) 134 (0x1408 + (port * 0x280) + (cpu_id * 0x28)))
132#define XSTORM_ASSERT_LIST_INDEX_OFFSET \ 135#define XSTORM_ASSERT_LIST_INDEX_OFFSET \
133 (IS_E1H_OFFSET? 0x9000 : 0x1000) 136 (IS_E1H_OFFSET ? 0x9000 : 0x1000)
134#define XSTORM_ASSERT_LIST_OFFSET(idx) \ 137#define XSTORM_ASSERT_LIST_OFFSET(idx) \
135 (IS_E1H_OFFSET? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10))) 138 (IS_E1H_OFFSET ? (0x9020 + (idx * 0x10)) : (0x1020 + (idx * 0x10)))
136#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \ 139#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) \
137 (IS_E1H_OFFSET? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40))) 140 (IS_E1H_OFFSET ? (0x24a8 + (port * 0x40)) : (0x3ba0 + (port * 0x40)))
138#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \ 141#define XSTORM_DEF_SB_HC_DISABLE_OFFSET(function, index) \
139 (IS_E1H_OFFSET? (0xa01a + ((function>>1) * 0x28) + ((function&1) \ 142 (IS_E1H_OFFSET ? (0xa01a + ((function>>1) * 0x28) + \
140 * 0xa0) + (index * 0x4)) : (0x141a + (function * 0x28) + (index * \ 143 ((function&1) * 0xa0) + (index * 0x4)) : (0x141a + (function * \
141 0x4))) 144 0x28) + (index * 0x4)))
142#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \ 145#define XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(function) \
143 (IS_E1H_OFFSET? (0xa000 + ((function>>1) * 0x28) + ((function&1) \ 146 (IS_E1H_OFFSET ? (0xa000 + ((function>>1) * 0x28) + \
144 * 0xa0)) : (0x1400 + (function * 0x28))) 147 ((function&1) * 0xa0)) : (0x1400 + (function * 0x28)))
145#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \ 148#define XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(function) \
146 (IS_E1H_OFFSET? (0xa008 + ((function>>1) * 0x28) + ((function&1) \ 149 (IS_E1H_OFFSET ? (0xa008 + ((function>>1) * 0x28) + \
147 * 0xa0)) : (0x1408 + (function * 0x28))) 150 ((function&1) * 0xa0)) : (0x1408 + (function * 0x28)))
148#define XSTORM_E1HOV_OFFSET(function) \ 151#define XSTORM_E1HOV_OFFSET(function) \
149 (IS_E1H_OFFSET? (0x2ab8 + (function * 0x2)) : 0xffffffff) 152 (IS_E1H_OFFSET ? (0x2ab8 + (function * 0x2)) : 0xffffffff)
150#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \ 153#define XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(function) \
151 (IS_E1H_OFFSET? (0x2418 + (function * 0x8)) : (0x3b70 + \ 154 (IS_E1H_OFFSET ? (0x2418 + (function * 0x8)) : (0x3b70 + \
152 (function * 0x8))) 155 (function * 0x8)))
153#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \ 156#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(function) \
154 (IS_E1H_OFFSET? (0x2568 + (function * 0x70)) : (0x3c60 + \ 157 (IS_E1H_OFFSET ? (0x2568 + (function * 0x70)) : (0x3c60 + \
155 (function * 0x70))) 158 (function * 0x70)))
156#define XSTORM_FUNCTION_MODE_OFFSET \ 159#define XSTORM_FUNCTION_MODE_OFFSET \
157 (IS_E1H_OFFSET? 0x2ac8 : 0xffffffff) 160 (IS_E1H_OFFSET ? 0x2ac8 : 0xffffffff)
158#define XSTORM_HC_BTR_OFFSET(port) \ 161#define XSTORM_HC_BTR_OFFSET(port) \
159 (IS_E1H_OFFSET? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18))) 162 (IS_E1H_OFFSET ? (0xa144 + (port * 0x30)) : (0x1454 + (port * 0x18)))
163#define XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stats_counter_id) \
164 (IS_E1H_OFFSET ? (0xc000 + (port * 0x3f0) + (stats_counter_id * \
165 0x38)) : (0x3378 + (port * 0x3f0) + (stats_counter_id * 0x38)))
160#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \ 166#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(function) \
161 (IS_E1H_OFFSET? (0x2528 + (function * 0x70)) : (0x3c20 + \ 167 (IS_E1H_OFFSET ? (0x2528 + (function * 0x70)) : (0x3c20 + \
162 (function * 0x70))) 168 (function * 0x70)))
163#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \ 169#define XSTORM_SPQ_PAGE_BASE_OFFSET(function) \
164 (IS_E1H_OFFSET? (0x2000 + (function * 0x10)) : (0x3328 + \ 170 (IS_E1H_OFFSET ? (0x2000 + (function * 0x10)) : (0x3328 + \
165 (function * 0x10))) 171 (function * 0x10)))
166#define XSTORM_SPQ_PROD_OFFSET(function) \ 172#define XSTORM_SPQ_PROD_OFFSET(function) \
167 (IS_E1H_OFFSET? (0x2008 + (function * 0x10)) : (0x3330 + \ 173 (IS_E1H_OFFSET ? (0x2008 + (function * 0x10)) : (0x3330 + \
168 (function * 0x10))) 174 (function * 0x10)))
169#define XSTORM_STATS_FLAGS_OFFSET(function) \ 175#define XSTORM_STATS_FLAGS_OFFSET(function) \
170 (IS_E1H_OFFSET? (0x23d8 + (function * 0x8)) : (0x3b60 + \ 176 (IS_E1H_OFFSET ? (0x23d8 + (function * 0x8)) : (0x3b60 + \
171 (function * 0x8))) 177 (function * 0x8)))
172#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 178#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
173 179
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index d3e8198d7dba..efd764427fa1 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1268,7 +1268,7 @@ struct doorbell {
1268 1268
1269 1269
1270/* 1270/*
1271 * IGU driver acknowlegement register 1271 * IGU driver acknowledgement register
1272 */ 1272 */
1273struct igu_ack_register { 1273struct igu_ack_register {
1274#if defined(__BIG_ENDIAN) 1274#if defined(__BIG_ENDIAN)
@@ -1882,7 +1882,7 @@ struct timers_block_context {
1882}; 1882};
1883 1883
1884/* 1884/*
1885 * structure for easy accessability to assembler 1885 * structure for easy accessibility to assembler
1886 */ 1886 */
1887struct eth_tx_bd_flags { 1887struct eth_tx_bd_flags {
1888 u8 as_bitfield; 1888 u8 as_bitfield;
@@ -2044,7 +2044,7 @@ struct eth_context {
2044 2044
2045 2045
2046/* 2046/*
2047 * ethernet doorbell 2047 * Ethernet doorbell
2048 */ 2048 */
2049struct eth_tx_doorbell { 2049struct eth_tx_doorbell {
2050#if defined(__BIG_ENDIAN) 2050#if defined(__BIG_ENDIAN)
@@ -2256,7 +2256,7 @@ struct ramrod_data {
2256}; 2256};
2257 2257
2258/* 2258/*
2259 * union for ramrod data for ethernet protocol (CQE) (force size of 16 bits) 2259 * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
2260 */ 2260 */
2261union eth_ramrod_data { 2261union eth_ramrod_data {
2262 struct ramrod_data general; 2262 struct ramrod_data general;
@@ -2330,7 +2330,7 @@ struct spe_hdr {
2330}; 2330};
2331 2331
2332/* 2332/*
2333 * ethernet slow path element 2333 * Ethernet slow path element
2334 */ 2334 */
2335union eth_specific_data { 2335union eth_specific_data {
2336 u8 protocol_data[8]; 2336 u8 protocol_data[8];
@@ -2343,7 +2343,7 @@ union eth_specific_data {
2343}; 2343};
2344 2344
2345/* 2345/*
2346 * ethernet slow path element 2346 * Ethernet slow path element
2347 */ 2347 */
2348struct eth_spe { 2348struct eth_spe {
2349 struct spe_hdr hdr; 2349 struct spe_hdr hdr;
@@ -2615,7 +2615,7 @@ struct tstorm_eth_rx_producers {
2615 2615
2616 2616
2617/* 2617/*
2618 * common flag to indicate existance of TPA. 2618 * common flag to indicate existence of TPA.
2619 */ 2619 */
2620struct tstorm_eth_tpa_exist { 2620struct tstorm_eth_tpa_exist {
2621#if defined(__BIG_ENDIAN) 2621#if defined(__BIG_ENDIAN)
@@ -2765,7 +2765,7 @@ struct tstorm_common_stats {
2765}; 2765};
2766 2766
2767/* 2767/*
2768 * Eth statistics query sturcture for the eth_stats_quesry ramrod 2768 * Eth statistics query structure for the eth_stats_query ramrod
2769 */ 2769 */
2770struct eth_stats_query { 2770struct eth_stats_query {
2771 struct xstorm_common_stats xstorm_common; 2771 struct xstorm_common_stats xstorm_common;
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 4c7750789b62..130927cfc75b 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -72,26 +72,26 @@
72 72
73 73
74struct raw_op { 74struct raw_op {
75 u32 op :8; 75 u32 op:8;
76 u32 offset :24; 76 u32 offset:24;
77 u32 raw_data; 77 u32 raw_data;
78}; 78};
79 79
80struct op_read { 80struct op_read {
81 u32 op :8; 81 u32 op:8;
82 u32 offset :24; 82 u32 offset:24;
83 u32 pad; 83 u32 pad;
84}; 84};
85 85
86struct op_write { 86struct op_write {
87 u32 op :8; 87 u32 op:8;
88 u32 offset :24; 88 u32 offset:24;
89 u32 val; 89 u32 val;
90}; 90};
91 91
92struct op_string_write { 92struct op_string_write {
93 u32 op :8; 93 u32 op:8;
94 u32 offset :24; 94 u32 offset:24;
95#ifdef __LITTLE_ENDIAN 95#ifdef __LITTLE_ENDIAN
96 u16 data_off; 96 u16 data_off;
97 u16 data_len; 97 u16 data_len;
@@ -102,8 +102,8 @@ struct op_string_write {
102}; 102};
103 103
104struct op_zero { 104struct op_zero {
105 u32 op :8; 105 u32 op:8;
106 u32 offset :24; 106 u32 offset:24;
107 u32 len; 107 u32 len;
108}; 108};
109 109
@@ -208,7 +208,7 @@ static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
208/********************************************************* 208/*********************************************************
209 There are different blobs for each PRAM section. 209 There are different blobs for each PRAM section.
210 In addition, each blob write operation is divided into a few operations 210 In addition, each blob write operation is divided into a few operations
211 in order to decrease the amount of phys. contigious buffer needed. 211 in order to decrease the amount of phys. contiguous buffer needed.
212 Thus, when we select a blob the address may be with some offset 212 Thus, when we select a blob the address may be with some offset
213 from the beginning of PRAM section. 213 from the beginning of PRAM section.
214 The same holds for the INT_TABLE sections. 214 The same holds for the INT_TABLE sections.
@@ -336,7 +336,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 op_start, u32 op_end)
336 len = op->str_wr.data_len; 336 len = op->str_wr.data_len;
337 data = data_base + op->str_wr.data_off; 337 data = data_base + op->str_wr.data_off;
338 338
339 /* carefull! it must be in order */ 339 /* careful! it must be in order */
340 if (unlikely(op_type > OP_WB)) { 340 if (unlikely(op_type > OP_WB)) {
341 341
342 /* If E1 only */ 342 /* If E1 only */
@@ -740,7 +740,7 @@ static u8 calc_crc8(u32 data, u8 crc)
740 return crc_res; 740 return crc_res;
741} 741}
742 742
743/* regiesers addresses are not in order 743/* registers addresses are not in order
744 so these arrays help simplify the code */ 744 so these arrays help simplify the code */
745static const int cm_start[E1H_FUNC_MAX][9] = { 745static const int cm_start[E1H_FUNC_MAX][9] = {
746 {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START, 746 {MISC_FUNC0_START, TCM_FUNC0_START, UCM_FUNC0_START, CCM_FUNC0_START,
diff --git a/drivers/net/bnx2x_init_values.h b/drivers/net/bnx2x_init_values.h
index 63019055e4bb..9755bf6b08dd 100644
--- a/drivers/net/bnx2x_init_values.h
+++ b/drivers/net/bnx2x_init_values.h
@@ -901,31 +901,28 @@ static const struct raw_op init_ops[] = {
901 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4}, 901 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3760, 0x4},
902 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42}, 902 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1e20, 0x42},
903 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9}, 903 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3738, 0x9},
904 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x400}, 904 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2},
905 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293}, 905 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x3738 + 0x24, 0x10293},
906 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c00, 0x2}, 906 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x20278},
907 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42}, 907 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3180, 0x42},
908 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2c00 + 0x8, 0x20278}, 908 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
909 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400}, 909 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x400},
910 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b68, 0x2}, 910 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027a},
911 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2}, 911 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4000, 0x2},
912 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x4b68 + 0x8, 0x2027a},
913 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294}, 912 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x4000 + 0x8, 0x20294},
914 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b10, 0x2},
915 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2}, 913 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b68, 0x2},
916 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x2830, 0x2027c},
917 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296}, 914 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x6b68 + 0x8, 0x20296},
918 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2}, 915 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b10, 0x2},
919 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298}, 916 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x74c0, 0x20298},
920 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, 917 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
921 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027e}, 918 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c00, 0x10027c},
922 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a}, 919 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c00, 0x10029a},
923 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0}, 920 {OP_WR, USEM_REG_FAST_MEMORY + 0x10800, 0x0},
924 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028e}, 921 {OP_SW_E1, USEM_REG_FAST_MEMORY + 0x10c40, 0x10028c},
925 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa}, 922 {OP_SW_E1H, USEM_REG_FAST_MEMORY + 0x10c40, 0x1002aa},
926 {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000}, 923 {OP_ZP_E1, USEM_REG_INT_TABLE, 0xc20000},
927 {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000}, 924 {OP_ZP_E1H, USEM_REG_INT_TABLE, 0xc40000},
928 {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029e}, 925 {OP_WR_64_E1, USEM_REG_INT_TABLE + 0x368, 0x13029c},
929 {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba}, 926 {OP_WR_64_E1H, USEM_REG_INT_TABLE + 0x368, 0x1302ba},
930 {OP_ZP_E1, USEM_REG_PRAM, 0x311c0000}, 927 {OP_ZP_E1, USEM_REG_PRAM, 0x311c0000},
931 {OP_ZP_E1H, USEM_REG_PRAM, 0x31070000}, 928 {OP_ZP_E1H, USEM_REG_PRAM, 0x31070000},
@@ -933,11 +930,11 @@ static const struct raw_op init_ops[] = {
933 {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42}, 930 {OP_ZP_E1H, USEM_REG_PRAM + 0x8000, 0x330e0c42},
934 {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919}, 931 {OP_ZP_E1, USEM_REG_PRAM + 0x10000, 0x38561919},
935 {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906}, 932 {OP_ZP_E1H, USEM_REG_PRAM + 0x10000, 0x389b1906},
936 {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x500402a0}, 933 {OP_WR_64_E1, USEM_REG_PRAM + 0x17fe0, 0x5004029e},
937 {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d}, 934 {OP_ZP_E1H, USEM_REG_PRAM + 0x18000, 0x132272d},
938 {OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc}, 935 {OP_WR_64_E1H, USEM_REG_PRAM + 0x18250, 0x4fb602bc},
939#define USEM_COMMON_END 790 936#define USEM_COMMON_END 787
940#define USEM_PORT0_START 790 937#define USEM_PORT0_START 787
941 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0}, 938 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1400, 0xa0},
942 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0}, 939 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9000, 0xa0},
943 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa}, 940 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1900, 0xa},
@@ -950,44 +947,27 @@ static const struct raw_op init_ops[] = {
950 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96}, 947 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3288, 0x96},
951 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72}, 948 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5440, 0x72},
952 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20}, 949 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5000, 0x20},
953 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3000, 0x20}, 950 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52},
954 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20}, 951 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5100, 0x20},
955 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3100, 0x20}, 952 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc},
956 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20}, 953 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5200, 0x20},
957 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3200, 0x20},
958 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20}, 954 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5300, 0x20},
959 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3300, 0x20},
960 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20}, 955 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5400, 0x20},
961 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3400, 0x20},
962 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20}, 956 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5500, 0x20},
963 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3500, 0x20},
964 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20}, 957 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5600, 0x20},
965 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3600, 0x20},
966 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20}, 958 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5700, 0x20},
967 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3700, 0x20},
968 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20}, 959 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5800, 0x20},
969 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3800, 0x20},
970 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20}, 960 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5900, 0x20},
971 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3900, 0x20},
972 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20}, 961 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a00, 0x20},
973 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a00, 0x20},
974 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20}, 962 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b00, 0x20},
975 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b00, 0x20},
976 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20}, 963 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c00, 0x20},
977 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c00, 0x20},
978 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20}, 964 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d00, 0x20},
979 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d00, 0x20},
980 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20}, 965 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e00, 0x20},
981 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e00, 0x20},
982 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20}, 966 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f00, 0x20},
983 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f00, 0x20},
984 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52}, 967 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6b78, 0x52},
985 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c10, 0x2},
986 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc}, 968 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e08, 0xc},
987 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4b78, 0x52}, 969#define USEM_PORT0_END 818
988 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e08, 0xc}, 970#define USEM_PORT1_START 818
989#define USEM_PORT0_END 838
990#define USEM_PORT1_START 838
991 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0}, 971 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1680, 0xa0},
992 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0}, 972 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x9280, 0xa0},
993 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa}, 973 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x1928, 0xa},
@@ -1000,76 +980,59 @@ static const struct raw_op init_ops[] = {
1000 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96}, 980 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x34e0, 0x96},
1001 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72}, 981 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x5608, 0x72},
1002 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20}, 982 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5080, 0x20},
1003 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3080, 0x20}, 983 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52},
1004 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20}, 984 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5180, 0x20},
1005 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3180, 0x20}, 985 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc},
1006 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20}, 986 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5280, 0x20},
1007 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3280, 0x20},
1008 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20}, 987 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5380, 0x20},
1009 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3380, 0x20},
1010 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20}, 988 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5480, 0x20},
1011 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3480, 0x20},
1012 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20}, 989 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5580, 0x20},
1013 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3580, 0x20},
1014 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20}, 990 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5680, 0x20},
1015 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3680, 0x20},
1016 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20}, 991 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5780, 0x20},
1017 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3780, 0x20},
1018 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20}, 992 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5880, 0x20},
1019 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3880, 0x20},
1020 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20}, 993 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5980, 0x20},
1021 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3980, 0x20},
1022 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20}, 994 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5a80, 0x20},
1023 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3a80, 0x20},
1024 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20}, 995 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5b80, 0x20},
1025 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3b80, 0x20},
1026 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20}, 996 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5c80, 0x20},
1027 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3c80, 0x20},
1028 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20}, 997 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5d80, 0x20},
1029 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3d80, 0x20},
1030 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20}, 998 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5e80, 0x20},
1031 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3e80, 0x20},
1032 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20}, 999 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x5f80, 0x20},
1033 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x3f80, 0x20},
1034 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52}, 1000 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6cc0, 0x52},
1035 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x2c20, 0x2},
1036 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc}, 1001 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x6e38, 0xc},
1037 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4cc0, 0x52}, 1002#define USEM_PORT1_END 849
1038 {OP_ZR_E1, USEM_REG_FAST_MEMORY + 0x4e38, 0xc}, 1003#define USEM_FUNC0_START 849
1039#define USEM_PORT1_END 886
1040#define USEM_FUNC0_START 886
1041 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4}, 1004 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3000, 0x4},
1042 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2}, 1005 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4010, 0x2},
1043#define USEM_FUNC0_END 888 1006#define USEM_FUNC0_END 851
1044#define USEM_FUNC1_START 888 1007#define USEM_FUNC1_START 851
1045 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4}, 1008 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3010, 0x4},
1046 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2}, 1009 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4020, 0x2},
1047#define USEM_FUNC1_END 890 1010#define USEM_FUNC1_END 853
1048#define USEM_FUNC2_START 890 1011#define USEM_FUNC2_START 853
1049 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4}, 1012 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3020, 0x4},
1050 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2}, 1013 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4030, 0x2},
1051#define USEM_FUNC2_END 892 1014#define USEM_FUNC2_END 855
1052#define USEM_FUNC3_START 892 1015#define USEM_FUNC3_START 855
1053 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4}, 1016 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3030, 0x4},
1054 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2}, 1017 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4040, 0x2},
1055#define USEM_FUNC3_END 894 1018#define USEM_FUNC3_END 857
1056#define USEM_FUNC4_START 894 1019#define USEM_FUNC4_START 857
1057 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4}, 1020 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3040, 0x4},
1058 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2}, 1021 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4050, 0x2},
1059#define USEM_FUNC4_END 896 1022#define USEM_FUNC4_END 859
1060#define USEM_FUNC5_START 896 1023#define USEM_FUNC5_START 859
1061 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4}, 1024 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3050, 0x4},
1062 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2}, 1025 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4060, 0x2},
1063#define USEM_FUNC5_END 898 1026#define USEM_FUNC5_END 861
1064#define USEM_FUNC6_START 898 1027#define USEM_FUNC6_START 861
1065 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4}, 1028 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3060, 0x4},
1066 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2}, 1029 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4070, 0x2},
1067#define USEM_FUNC6_END 900 1030#define USEM_FUNC6_END 863
1068#define USEM_FUNC7_START 900 1031#define USEM_FUNC7_START 863
1069 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4}, 1032 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x3070, 0x4},
1070 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2}, 1033 {OP_ZR_E1H, USEM_REG_FAST_MEMORY + 0x4080, 0x2},
1071#define USEM_FUNC7_END 902 1034#define USEM_FUNC7_END 865
1072#define CSEM_COMMON_START 902 1035#define CSEM_COMMON_START 865
1073 {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0}, 1036 {OP_RD, CSEM_REG_MSG_NUM_FIC0, 0x0},
1074 {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0}, 1037 {OP_RD, CSEM_REG_MSG_NUM_FIC1, 0x0},
1075 {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0}, 1038 {OP_RD, CSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1128,29 +1091,29 @@ static const struct raw_op init_ops[] = {
1128 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0}, 1091 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x11e8, 0x0},
1129 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240}, 1092 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x25c0, 0x240},
1130 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0}, 1093 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3000, 0xc0},
1131 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a2}, 1094 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x2ec8, 0x802a0},
1132 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80}, 1095 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x4070, 0x80},
1133 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4}, 1096 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x5280, 0x4},
1134 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240}, 1097 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6280, 0x240},
1135 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be}, 1098 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x6b88, 0x2002be},
1136 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff}, 1099 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x13fffff},
1137 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002aa}, 1100 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002a8},
1138 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de}, 1101 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c00, 0x1002de},
1139 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0}, 1102 {OP_WR, CSEM_REG_FAST_MEMORY + 0x10800, 0x0},
1140 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ba}, 1103 {OP_SW_E1, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002b8},
1141 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee}, 1104 {OP_SW_E1H, CSEM_REG_FAST_MEMORY + 0x10c40, 0x1002ee},
1142 {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000}, 1105 {OP_ZP_E1, CSEM_REG_INT_TABLE, 0x6e0000},
1143 {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000}, 1106 {OP_ZP_E1H, CSEM_REG_INT_TABLE, 0x6f0000},
1144 {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002ca}, 1107 {OP_WR_64_E1, CSEM_REG_INT_TABLE + 0x380, 0x1002c8},
1145 {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe}, 1108 {OP_WR_64_E1H, CSEM_REG_INT_TABLE + 0x380, 0x1002fe},
1146 {OP_ZP_E1, CSEM_REG_PRAM, 0x32580000}, 1109 {OP_ZP_E1, CSEM_REG_PRAM, 0x32580000},
1147 {OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000}, 1110 {OP_ZP_E1H, CSEM_REG_PRAM, 0x31fa0000},
1148 {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96}, 1111 {OP_ZP_E1, CSEM_REG_PRAM + 0x8000, 0x18270c96},
1149 {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f}, 1112 {OP_ZP_E1H, CSEM_REG_PRAM + 0x8000, 0x19040c7f},
1150 {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402cc}, 1113 {OP_WR_64_E1, CSEM_REG_PRAM + 0xb210, 0x682402ca},
1151 {OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300}, 1114 {OP_WR_64_E1H, CSEM_REG_PRAM + 0xb430, 0x67e00300},
1152#define CSEM_COMMON_END 981 1115#define CSEM_COMMON_END 944
1153#define CSEM_PORT0_START 981 1116#define CSEM_PORT0_START 944
1154 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0}, 1117 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1400, 0xa0},
1155 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0}, 1118 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8000, 0xa0},
1156 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10}, 1119 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1900, 0x10},
@@ -1163,8 +1126,8 @@ static const struct raw_op init_ops[] = {
1163 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30}, 1126 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6040, 0x30},
1164 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6}, 1127 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3040, 0x6},
1165 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30}, 1128 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x2410, 0x30},
1166#define CSEM_PORT0_END 993 1129#define CSEM_PORT0_END 956
1167#define CSEM_PORT1_START 993 1130#define CSEM_PORT1_START 956
1168 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0}, 1131 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1680, 0xa0},
1169 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0}, 1132 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x8280, 0xa0},
1170 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10}, 1133 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x1940, 0x10},
@@ -1177,43 +1140,43 @@ static const struct raw_op init_ops[] = {
1177 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30}, 1140 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x6100, 0x30},
1178 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6}, 1141 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x3058, 0x6},
1179 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30}, 1142 {OP_ZR_E1, CSEM_REG_FAST_MEMORY + 0x24d0, 0x30},
1180#define CSEM_PORT1_END 1005 1143#define CSEM_PORT1_END 968
1181#define CSEM_FUNC0_START 1005 1144#define CSEM_FUNC0_START 968
1182 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0}, 1145 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1148, 0x0},
1183 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2}, 1146 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3300, 0x2},
1184#define CSEM_FUNC0_END 1007 1147#define CSEM_FUNC0_END 970
1185#define CSEM_FUNC1_START 1007 1148#define CSEM_FUNC1_START 970
1186 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0}, 1149 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x114c, 0x0},
1187 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2}, 1150 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3308, 0x2},
1188#define CSEM_FUNC1_END 1009 1151#define CSEM_FUNC1_END 972
1189#define CSEM_FUNC2_START 1009 1152#define CSEM_FUNC2_START 972
1190 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0}, 1153 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1150, 0x0},
1191 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2}, 1154 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3310, 0x2},
1192#define CSEM_FUNC2_END 1011 1155#define CSEM_FUNC2_END 974
1193#define CSEM_FUNC3_START 1011 1156#define CSEM_FUNC3_START 974
1194 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0}, 1157 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1154, 0x0},
1195 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2}, 1158 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3318, 0x2},
1196#define CSEM_FUNC3_END 1013 1159#define CSEM_FUNC3_END 976
1197#define CSEM_FUNC4_START 1013 1160#define CSEM_FUNC4_START 976
1198 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0}, 1161 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1158, 0x0},
1199 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2}, 1162 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3320, 0x2},
1200#define CSEM_FUNC4_END 1015 1163#define CSEM_FUNC4_END 978
1201#define CSEM_FUNC5_START 1015 1164#define CSEM_FUNC5_START 978
1202 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0}, 1165 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x115c, 0x0},
1203 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2}, 1166 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3328, 0x2},
1204#define CSEM_FUNC5_END 1017 1167#define CSEM_FUNC5_END 980
1205#define CSEM_FUNC6_START 1017 1168#define CSEM_FUNC6_START 980
1206 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0}, 1169 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1160, 0x0},
1207 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2}, 1170 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3330, 0x2},
1208#define CSEM_FUNC6_END 1019 1171#define CSEM_FUNC6_END 982
1209#define CSEM_FUNC7_START 1019 1172#define CSEM_FUNC7_START 982
1210 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0}, 1173 {OP_WR_E1H, CSEM_REG_FAST_MEMORY + 0x1164, 0x0},
1211 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2}, 1174 {OP_ZR_E1H, CSEM_REG_FAST_MEMORY + 0x3338, 0x2},
1212#define CSEM_FUNC7_END 1021 1175#define CSEM_FUNC7_END 984
1213#define XPB_COMMON_START 1021 1176#define XPB_COMMON_START 984
1214 {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20}, 1177 {OP_WR, GRCBASE_XPB + PB_REG_CONTROL, 0x20},
1215#define XPB_COMMON_END 1022 1178#define XPB_COMMON_END 985
1216#define DQ_COMMON_START 1022 1179#define DQ_COMMON_START 985
1217 {OP_WR, DORQ_REG_MODE_ACT, 0x2}, 1180 {OP_WR, DORQ_REG_MODE_ACT, 0x2},
1218 {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3}, 1181 {OP_WR, DORQ_REG_NORM_CID_OFST, 0x3},
1219 {OP_WR, DORQ_REG_OUTST_REQ, 0x4}, 1182 {OP_WR, DORQ_REG_OUTST_REQ, 0x4},
@@ -1232,8 +1195,8 @@ static const struct raw_op init_ops[] = {
1232 {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c}, 1195 {OP_WR, DORQ_REG_DQ_FIFO_AFULL_TH, 0x76c},
1233 {OP_WR, DORQ_REG_REGN, 0x7c1004}, 1196 {OP_WR, DORQ_REG_REGN, 0x7c1004},
1234 {OP_WR, DORQ_REG_IF_EN, 0xf}, 1197 {OP_WR, DORQ_REG_IF_EN, 0xf},
1235#define DQ_COMMON_END 1040 1198#define DQ_COMMON_END 1003
1236#define TIMERS_COMMON_START 1040 1199#define TIMERS_COMMON_START 1003
1237 {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2}, 1200 {OP_ZR, TM_REG_CLIN_PRIOR0_CLIENT, 0x2},
1238 {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c}, 1201 {OP_WR, TM_REG_LIN_SETCLR_FIFO_ALFULL_THR, 0x1c},
1239 {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1}, 1202 {OP_WR, TM_REG_CFC_AC_CRDCNT_VAL, 0x1},
@@ -1256,14 +1219,14 @@ static const struct raw_op init_ops[] = {
1256 {OP_WR, TM_REG_EN_CL0_INPUT, 0x1}, 1219 {OP_WR, TM_REG_EN_CL0_INPUT, 0x1},
1257 {OP_WR, TM_REG_EN_CL1_INPUT, 0x1}, 1220 {OP_WR, TM_REG_EN_CL1_INPUT, 0x1},
1258 {OP_WR, TM_REG_EN_CL2_INPUT, 0x1}, 1221 {OP_WR, TM_REG_EN_CL2_INPUT, 0x1},
1259#define TIMERS_COMMON_END 1062 1222#define TIMERS_COMMON_END 1025
1260#define TIMERS_PORT0_START 1062 1223#define TIMERS_PORT0_START 1025
1261 {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2}, 1224 {OP_ZR, TM_REG_LIN0_PHY_ADDR, 0x2},
1262#define TIMERS_PORT0_END 1063 1225#define TIMERS_PORT0_END 1026
1263#define TIMERS_PORT1_START 1063 1226#define TIMERS_PORT1_START 1026
1264 {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2}, 1227 {OP_ZR, TM_REG_LIN1_PHY_ADDR, 0x2},
1265#define TIMERS_PORT1_END 1064 1228#define TIMERS_PORT1_END 1027
1266#define XSDM_COMMON_START 1064 1229#define XSDM_COMMON_START 1027
1267 {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614}, 1230 {OP_WR_E1, XSDM_REG_CFC_RSP_START_ADDR, 0x614},
1268 {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424}, 1231 {OP_WR_E1H, XSDM_REG_CFC_RSP_START_ADDR, 0x424},
1269 {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600}, 1232 {OP_WR_E1, XSDM_REG_CMP_COUNTER_START_ADDR, 0x600},
@@ -1311,8 +1274,8 @@ static const struct raw_op init_ops[] = {
1311 {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8}, 1274 {OP_WR_ASIC, XSDM_REG_TIMER_TICK, 0x3e8},
1312 {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1}, 1275 {OP_WR_EMUL, XSDM_REG_TIMER_TICK, 0x1},
1313 {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa}, 1276 {OP_WR_FPGA, XSDM_REG_TIMER_TICK, 0xa},
1314#define XSDM_COMMON_END 1111 1277#define XSDM_COMMON_END 1074
1315#define QM_COMMON_START 1111 1278#define QM_COMMON_START 1074
1316 {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6}, 1279 {OP_WR, QM_REG_ACTCTRINITVAL_0, 0x6},
1317 {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5}, 1280 {OP_WR, QM_REG_ACTCTRINITVAL_1, 0x5},
1318 {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa}, 1281 {OP_WR, QM_REG_ACTCTRINITVAL_2, 0xa},
@@ -1613,8 +1576,8 @@ static const struct raw_op init_ops[] = {
1613 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5}, 1576 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_6, 0x5},
1614 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7}, 1577 {OP_WR_E1H, QM_REG_PQ2PCIFUNC_7, 0x7},
1615 {OP_WR, QM_REG_CMINTEN, 0xff}, 1578 {OP_WR, QM_REG_CMINTEN, 0xff},
1616#define QM_COMMON_END 1411 1579#define QM_COMMON_END 1374
1617#define PBF_COMMON_START 1411 1580#define PBF_COMMON_START 1374
1618 {OP_WR, PBF_REG_INIT, 0x1}, 1581 {OP_WR, PBF_REG_INIT, 0x1},
1619 {OP_WR, PBF_REG_INIT_P4, 0x1}, 1582 {OP_WR, PBF_REG_INIT_P4, 0x1},
1620 {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1}, 1583 {OP_WR, PBF_REG_MAC_LB_ENABLE, 0x1},
@@ -1622,20 +1585,20 @@ static const struct raw_op init_ops[] = {
1622 {OP_WR, PBF_REG_INIT_P4, 0x0}, 1585 {OP_WR, PBF_REG_INIT_P4, 0x0},
1623 {OP_WR, PBF_REG_INIT, 0x0}, 1586 {OP_WR, PBF_REG_INIT, 0x0},
1624 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0}, 1587 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P4, 0x0},
1625#define PBF_COMMON_END 1418 1588#define PBF_COMMON_END 1381
1626#define PBF_PORT0_START 1418 1589#define PBF_PORT0_START 1381
1627 {OP_WR, PBF_REG_INIT_P0, 0x1}, 1590 {OP_WR, PBF_REG_INIT_P0, 0x1},
1628 {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1}, 1591 {OP_WR, PBF_REG_MAC_IF0_ENABLE, 0x1},
1629 {OP_WR, PBF_REG_INIT_P0, 0x0}, 1592 {OP_WR, PBF_REG_INIT_P0, 0x0},
1630 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0}, 1593 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P0, 0x0},
1631#define PBF_PORT0_END 1422 1594#define PBF_PORT0_END 1385
1632#define PBF_PORT1_START 1422 1595#define PBF_PORT1_START 1385
1633 {OP_WR, PBF_REG_INIT_P1, 0x1}, 1596 {OP_WR, PBF_REG_INIT_P1, 0x1},
1634 {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1}, 1597 {OP_WR, PBF_REG_MAC_IF1_ENABLE, 0x1},
1635 {OP_WR, PBF_REG_INIT_P1, 0x0}, 1598 {OP_WR, PBF_REG_INIT_P1, 0x0},
1636 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0}, 1599 {OP_WR, PBF_REG_DISABLE_NEW_TASK_PROC_P1, 0x0},
1637#define PBF_PORT1_END 1426 1600#define PBF_PORT1_END 1389
1638#define XCM_COMMON_START 1426 1601#define XCM_COMMON_START 1389
1639 {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32}, 1602 {OP_WR, XCM_REG_XX_OVFL_EVNT_ID, 0x32},
1640 {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020}, 1603 {OP_WR, XCM_REG_XQM_XCM_HDR_P, 0x3150020},
1641 {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020}, 1604 {OP_WR, XCM_REG_XQM_XCM_HDR_S, 0x3150020},
@@ -1670,7 +1633,7 @@ static const struct raw_op init_ops[] = {
1670 {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f}, 1633 {OP_WR_E1, XCM_REG_XX_MSG_NUM, 0x1f},
1671 {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20}, 1634 {OP_WR_E1H, XCM_REG_XX_MSG_NUM, 0x20},
1672 {OP_ZR, XCM_REG_XX_TABLE, 0x12}, 1635 {OP_ZR, XCM_REG_XX_TABLE, 0x12},
1673 {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02ce}, 1636 {OP_SW_E1, XCM_REG_XX_DESCR_TABLE, 0x1f02cc},
1674 {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302}, 1637 {OP_SW_E1H, XCM_REG_XX_DESCR_TABLE, 0x1f0302},
1675 {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf}, 1638 {OP_WR, XCM_REG_N_SM_CTX_LD_0, 0xf},
1676 {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7}, 1639 {OP_WR, XCM_REG_N_SM_CTX_LD_1, 0x7},
@@ -1700,8 +1663,8 @@ static const struct raw_op init_ops[] = {
1700 {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1}, 1663 {OP_WR, XCM_REG_CDU_SM_WR_IFEN, 0x1},
1701 {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1}, 1664 {OP_WR, XCM_REG_CDU_SM_RD_IFEN, 0x1},
1702 {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1}, 1665 {OP_WR, XCM_REG_XCM_CFC_IFEN, 0x1},
1703#define XCM_COMMON_END 1490 1666#define XCM_COMMON_END 1453
1704#define XCM_PORT0_START 1490 1667#define XCM_PORT0_START 1453
1705 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1668 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1706 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1669 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1707 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1670 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1710,8 +1673,8 @@ static const struct raw_op init_ops[] = {
1710 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2}, 1673 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD10, 0x2},
1711 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1674 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1712 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1675 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1713#define XCM_PORT0_END 1498 1676#define XCM_PORT0_END 1461
1714#define XCM_PORT1_START 1498 1677#define XCM_PORT1_START 1461
1715 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1678 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1716 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1679 {OP_WR_E1, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1717 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1680 {OP_WR_E1, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1720,8 +1683,8 @@ static const struct raw_op init_ops[] = {
1720 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2}, 1683 {OP_WR_E1, XCM_REG_WU_DA_CNT_CMD11, 0x2},
1721 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1684 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1722 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1685 {OP_WR_E1, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1723#define XCM_PORT1_END 1506 1686#define XCM_PORT1_END 1469
1724#define XCM_FUNC0_START 1506 1687#define XCM_FUNC0_START 1469
1725 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1688 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1726 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1689 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1727 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1690 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1731,8 +1694,8 @@ static const struct raw_op init_ops[] = {
1731 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1694 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1732 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1695 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1733 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1696 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1734#define XCM_FUNC0_END 1515 1697#define XCM_FUNC0_END 1478
1735#define XCM_FUNC1_START 1515 1698#define XCM_FUNC1_START 1478
1736 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1699 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1737 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1700 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1738 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1701 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1742,8 +1705,8 @@ static const struct raw_op init_ops[] = {
1742 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1705 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1743 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1706 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1744 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1707 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1745#define XCM_FUNC1_END 1524 1708#define XCM_FUNC1_END 1487
1746#define XCM_FUNC2_START 1524 1709#define XCM_FUNC2_START 1487
1747 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1710 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1748 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1711 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1749 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1712 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1753,8 +1716,8 @@ static const struct raw_op init_ops[] = {
1753 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1716 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1754 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1717 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1755 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1718 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1756#define XCM_FUNC2_END 1533 1719#define XCM_FUNC2_END 1496
1757#define XCM_FUNC3_START 1533 1720#define XCM_FUNC3_START 1496
1758 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1721 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1759 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1722 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1760 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1723 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1764,8 +1727,8 @@ static const struct raw_op init_ops[] = {
1764 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1727 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1765 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1728 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1766 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1729 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1767#define XCM_FUNC3_END 1542 1730#define XCM_FUNC3_END 1505
1768#define XCM_FUNC4_START 1542 1731#define XCM_FUNC4_START 1505
1769 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1732 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1770 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1733 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1771 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1734 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1775,8 +1738,8 @@ static const struct raw_op init_ops[] = {
1775 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1738 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1776 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1739 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1777 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1740 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1778#define XCM_FUNC4_END 1551 1741#define XCM_FUNC4_END 1514
1779#define XCM_FUNC5_START 1551 1742#define XCM_FUNC5_START 1514
1780 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1743 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1781 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1744 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1782 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1745 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1786,8 +1749,8 @@ static const struct raw_op init_ops[] = {
1786 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1749 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1787 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1750 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1788 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1751 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1789#define XCM_FUNC5_END 1560 1752#define XCM_FUNC5_END 1523
1790#define XCM_FUNC6_START 1560 1753#define XCM_FUNC6_START 1523
1791 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8}, 1754 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_0, 0xc8},
1792 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2}, 1755 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 0x2},
1793 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0}, 1756 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 0x0},
@@ -1797,8 +1760,8 @@ static const struct raw_op init_ops[] = {
1797 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff}, 1760 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL00, 0xff},
1798 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff}, 1761 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL10, 0xff},
1799 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0}, 1762 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_0, 0x0},
1800#define XCM_FUNC6_END 1569 1763#define XCM_FUNC6_END 1532
1801#define XCM_FUNC7_START 1569 1764#define XCM_FUNC7_START 1532
1802 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8}, 1765 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_TMR_VAL_1, 0xc8},
1803 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2}, 1766 {OP_WR_E1H, XCM_REG_GLB_DEL_ACK_MAX_CNT_1, 0x2},
1804 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0}, 1767 {OP_WR_E1H, XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01, 0x0},
@@ -1808,8 +1771,8 @@ static const struct raw_op init_ops[] = {
1808 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff}, 1771 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL01, 0xff},
1809 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff}, 1772 {OP_WR_E1H, XCM_REG_WU_DA_CNT_UPD_VAL11, 0xff},
1810 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0}, 1773 {OP_WR_E1H, XCM_REG_PHYS_QNUM3_1, 0x0},
1811#define XCM_FUNC7_END 1578 1774#define XCM_FUNC7_END 1541
1812#define XSEM_COMMON_START 1578 1775#define XSEM_COMMON_START 1541
1813 {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0}, 1776 {OP_RD, XSEM_REG_MSG_NUM_FIC0, 0x0},
1814 {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0}, 1777 {OP_RD, XSEM_REG_MSG_NUM_FIC1, 0x0},
1815 {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0}, 1778 {OP_RD, XSEM_REG_MSG_NUM_FOC0, 0x0},
@@ -1876,9 +1839,9 @@ static const struct raw_op init_ops[] = {
1876 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2}, 1839 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x9000, 0x2},
1877 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0}, 1840 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3368, 0x0},
1878 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86}, 1841 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x21a8, 0x86},
1879 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202ed}, 1842 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3370, 0x202eb},
1880 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20}, 1843 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2000, 0x20},
1881 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ef}, 1844 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3b90, 0x402ed},
1882 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0}, 1845 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x23c8, 0x0},
1883 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1}, 1846 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1518, 0x1},
1884 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321}, 1847 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x23d0, 0x20321},
@@ -1886,29 +1849,29 @@ static const struct raw_op init_ops[] = {
1886 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323}, 1849 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2498, 0x40323},
1887 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0}, 1850 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1838, 0x0},
1888 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0}, 1851 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ac8, 0x0},
1889 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f3}, 1852 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1820, 0x202f1},
1890 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0}, 1853 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x2ab8, 0x0},
1891 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2}, 1854 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4ac0, 0x2},
1892 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1}, 1855 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0x3010, 0x1},
1893 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4}, 1856 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b00, 0x4},
1894 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10}, 1857 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x4040, 0x10},
1895 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f5}, 1858 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x1f50, 0x202f3},
1896 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327}, 1859 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x4000, 0x100327},
1897 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2}, 1860 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6ac0, 0x2},
1898 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4}, 1861 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b00, 0x4},
1899 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337}, 1862 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x83b0, 0x20337},
1900 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0}, 1863 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x0},
1901 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f7}, 1864 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c00, 0x1002f5},
1902 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339}, 1865 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c00, 0x100339},
1903 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000}, 1866 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x1000000},
1904 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80307}, 1867 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80305},
1905 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349}, 1868 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c40, 0x80349},
1906 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000}, 1869 {OP_WR, XSEM_REG_FAST_MEMORY + 0x10800, 0x2000000},
1907 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030f}, 1870 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x10c60, 0x8030d},
1908 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351}, 1871 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x10c60, 0x80351},
1909 {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000}, 1872 {OP_ZP_E1, XSEM_REG_INT_TABLE, 0xa90000},
1910 {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000}, 1873 {OP_ZP_E1H, XSEM_REG_INT_TABLE, 0xac0000},
1911 {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130317}, 1874 {OP_WR_64_E1, XSEM_REG_INT_TABLE + 0x368, 0x130315},
1912 {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359}, 1875 {OP_WR_64_E1H, XSEM_REG_INT_TABLE + 0x368, 0x130359},
1913 {OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000}, 1876 {OP_ZP_E1, XSEM_REG_PRAM, 0x344e0000},
1914 {OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000}, 1877 {OP_ZP_E1H, XSEM_REG_PRAM, 0x34620000},
@@ -1918,10 +1881,10 @@ static const struct raw_op init_ops[] = {
1918 {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22}, 1881 {OP_ZP_E1H, XSEM_REG_PRAM + 0x10000, 0x3e971b22},
1919 {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2}, 1882 {OP_ZP_E1, XSEM_REG_PRAM + 0x18000, 0x1dd02ad2},
1920 {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8}, 1883 {OP_ZP_E1H, XSEM_REG_PRAM + 0x18000, 0x21542ac8},
1921 {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60319}, 1884 {OP_WR_64_E1, XSEM_REG_PRAM + 0x1c0d0, 0x47e60317},
1922 {OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b}, 1885 {OP_WR_64_E1H, XSEM_REG_PRAM + 0x1c8d0, 0x46e6035b},
1923#define XSEM_COMMON_END 1688 1886#define XSEM_COMMON_END 1651
1924#define XSEM_PORT0_START 1688 1887#define XSEM_PORT0_START 1651
1925 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10}, 1888 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3ba0, 0x10},
1926 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc}, 1889 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc000, 0xfc},
1927 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c}, 1890 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c20, 0x1c},
@@ -1934,7 +1897,7 @@ static const struct raw_op init_ops[] = {
1934 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c}, 1897 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x26e8, 0x1c},
1935 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0}, 1898 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b58, 0x0},
1936 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c}, 1899 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x27c8, 0x1c},
1937 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x10031b}, 1900 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d10, 0x100319},
1938 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28}, 1901 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa000, 0x28},
1939 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0}, 1902 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1500, 0x0},
1940 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc}, 1903 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa140, 0xc},
@@ -1950,12 +1913,12 @@ static const struct raw_op init_ops[] = {
1950 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d}, 1913 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ac8, 0x2035d},
1951 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1}, 1914 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50b8, 0x1},
1952 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42}, 1915 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6b10, 0x42},
1953 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x2032b}, 1916 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ac8, 0x20329},
1954 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4}, 1917 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d20, 0x4},
1955 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42}, 1918 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4b10, 0x42},
1956 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4}, 1919 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d20, 0x4},
1957#define XSEM_PORT0_END 1720 1920#define XSEM_PORT0_END 1683
1958#define XSEM_PORT1_START 1720 1921#define XSEM_PORT1_START 1683
1959 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10}, 1922 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3be0, 0x10},
1960 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc}, 1923 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xc3f0, 0xfc},
1961 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c}, 1924 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x3c90, 0x1c},
@@ -1968,7 +1931,7 @@ static const struct raw_op init_ops[] = {
1968 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c}, 1931 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2758, 0x1c},
1969 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0}, 1932 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x3b5c, 0x0},
1970 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c}, 1933 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x2838, 0x1c},
1971 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032d}, 1934 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x3d50, 0x10032b},
1972 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28}, 1935 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa0a0, 0x28},
1973 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0}, 1936 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x1504, 0x0},
1974 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc}, 1937 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0xa170, 0xc},
@@ -1984,65 +1947,65 @@ static const struct raw_op init_ops[] = {
1984 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f}, 1947 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x6ad0, 0x2035f},
1985 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1}, 1948 {OP_WR_E1, XSEM_REG_FAST_MEMORY + 0x50bc, 0x1},
1986 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42}, 1949 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6c18, 0x42},
1987 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033d}, 1950 {OP_SW_E1, XSEM_REG_FAST_MEMORY + 0x4ad0, 0x2033b},
1988 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4}, 1951 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x6d30, 0x4},
1989 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42}, 1952 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4c18, 0x42},
1990 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4}, 1953 {OP_ZR_E1, XSEM_REG_FAST_MEMORY + 0x4d30, 0x4},
1991#define XSEM_PORT1_END 1752 1954#define XSEM_PORT1_END 1715
1992#define XSEM_FUNC0_START 1752 1955#define XSEM_FUNC0_START 1715
1993 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0}, 1956 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e0, 0x0},
1994 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361}, 1957 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28b8, 0x100361},
1995 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe}, 1958 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5048, 0xe},
1996#define XSEM_FUNC0_END 1755 1959#define XSEM_FUNC0_END 1718
1997#define XSEM_FUNC1_START 1755 1960#define XSEM_FUNC1_START 1718
1998 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0}, 1961 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e4, 0x0},
1999 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371}, 1962 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x28f8, 0x100371},
2000 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe}, 1963 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5080, 0xe},
2001#define XSEM_FUNC1_END 1758 1964#define XSEM_FUNC1_END 1721
2002#define XSEM_FUNC2_START 1758 1965#define XSEM_FUNC2_START 1721
2003 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0}, 1966 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7e8, 0x0},
2004 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381}, 1967 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2938, 0x100381},
2005 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe}, 1968 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50b8, 0xe},
2006#define XSEM_FUNC2_END 1761 1969#define XSEM_FUNC2_END 1724
2007#define XSEM_FUNC3_START 1761 1970#define XSEM_FUNC3_START 1724
2008 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0}, 1971 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7ec, 0x0},
2009 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391}, 1972 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2978, 0x100391},
2010 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe}, 1973 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x50f0, 0xe},
2011#define XSEM_FUNC3_END 1764 1974#define XSEM_FUNC3_END 1727
2012#define XSEM_FUNC4_START 1764 1975#define XSEM_FUNC4_START 1727
2013 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0}, 1976 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f0, 0x0},
2014 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1}, 1977 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29b8, 0x1003a1},
2015 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe}, 1978 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5128, 0xe},
2016#define XSEM_FUNC4_END 1767 1979#define XSEM_FUNC4_END 1730
2017#define XSEM_FUNC5_START 1767 1980#define XSEM_FUNC5_START 1730
2018 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0}, 1981 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f4, 0x0},
2019 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1}, 1982 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x29f8, 0x1003b1},
2020 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe}, 1983 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5160, 0xe},
2021#define XSEM_FUNC5_END 1770 1984#define XSEM_FUNC5_END 1733
2022#define XSEM_FUNC6_START 1770 1985#define XSEM_FUNC6_START 1733
2023 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0}, 1986 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7f8, 0x0},
2024 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1}, 1987 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a38, 0x1003c1},
2025 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe}, 1988 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x5198, 0xe},
2026#define XSEM_FUNC6_END 1773 1989#define XSEM_FUNC6_END 1736
2027#define XSEM_FUNC7_START 1773 1990#define XSEM_FUNC7_START 1736
2028 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0}, 1991 {OP_WR_E1H, XSEM_REG_FAST_MEMORY + 0xc7fc, 0x0},
2029 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1}, 1992 {OP_SW_E1H, XSEM_REG_FAST_MEMORY + 0x2a78, 0x1003d1},
2030 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe}, 1993 {OP_ZR_E1H, XSEM_REG_FAST_MEMORY + 0x51d0, 0xe},
2031#define XSEM_FUNC7_END 1776 1994#define XSEM_FUNC7_END 1739
2032#define CDU_COMMON_START 1776 1995#define CDU_COMMON_START 1739
2033 {OP_WR, CDU_REG_CDU_CONTROL0, 0x1}, 1996 {OP_WR, CDU_REG_CDU_CONTROL0, 0x1},
2034 {OP_WR_E1H, CDU_REG_MF_MODE, 0x1}, 1997 {OP_WR_E1H, CDU_REG_MF_MODE, 0x1},
2035 {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000}, 1998 {OP_WR, CDU_REG_CDU_CHK_MASK0, 0x3d000},
2036 {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d}, 1999 {OP_WR, CDU_REG_CDU_CHK_MASK1, 0x3d},
2037 {OP_WB_E1, CDU_REG_L1TT, 0x200033f}, 2000 {OP_WB_E1, CDU_REG_L1TT, 0x200033d},
2038 {OP_WB_E1H, CDU_REG_L1TT, 0x20003e1}, 2001 {OP_WB_E1H, CDU_REG_L1TT, 0x20003e1},
2039 {OP_WB_E1, CDU_REG_MATT, 0x20053f}, 2002 {OP_WB_E1, CDU_REG_MATT, 0x20053d},
2040 {OP_WB_E1H, CDU_REG_MATT, 0x2805e1}, 2003 {OP_WB_E1H, CDU_REG_MATT, 0x2805e1},
2041 {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2}, 2004 {OP_ZR_E1, CDU_REG_MATT + 0x80, 0x2},
2042 {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055f}, 2005 {OP_WB_E1, CDU_REG_MATT + 0x88, 0x6055d},
2043 {OP_ZR, CDU_REG_MATT + 0xa0, 0x18}, 2006 {OP_ZR, CDU_REG_MATT + 0xa0, 0x18},
2044#define CDU_COMMON_END 1787 2007#define CDU_COMMON_END 1750
2045#define DMAE_COMMON_START 1787 2008#define DMAE_COMMON_START 1750
2046 {OP_ZR, DMAE_REG_CMD_MEM, 0xe0}, 2009 {OP_ZR, DMAE_REG_CMD_MEM, 0xe0},
2047 {OP_WR, DMAE_REG_CRC16C_INIT, 0x0}, 2010 {OP_WR, DMAE_REG_CRC16C_INIT, 0x0},
2048 {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1}, 2011 {OP_WR, DMAE_REG_CRC16T10_INIT, 0x1},
@@ -2050,24 +2013,24 @@ static const struct raw_op init_ops[] = {
2050 {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2}, 2013 {OP_WR_E1H, DMAE_REG_PXP_REQ_INIT_CRD, 0x2},
2051 {OP_WR, DMAE_REG_PCI_IFEN, 0x1}, 2014 {OP_WR, DMAE_REG_PCI_IFEN, 0x1},
2052 {OP_WR, DMAE_REG_GRC_IFEN, 0x1}, 2015 {OP_WR, DMAE_REG_GRC_IFEN, 0x1},
2053#define DMAE_COMMON_END 1794 2016#define DMAE_COMMON_END 1757
2054#define PXP_COMMON_START 1794 2017#define PXP_COMMON_START 1757
2055 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50565}, 2018 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x400, 0x50563},
2056 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609}, 2019 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT + 0x400, 0x50609},
2057 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x5056a}, 2020 {OP_WB_E1, PXP_REG_HST_INBOUND_INT + 0x420, 0x50568},
2058 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e}, 2021 {OP_WB_E1H, PXP_REG_HST_INBOUND_INT, 0x5060e},
2059 {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056f}, 2022 {OP_WB_E1, PXP_REG_HST_INBOUND_INT, 0x5056d},
2060#define PXP_COMMON_END 1799 2023#define PXP_COMMON_END 1762
2061#define CFC_COMMON_START 1799 2024#define CFC_COMMON_START 1762
2062 {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100}, 2025 {OP_ZR_E1H, CFC_REG_LINK_LIST, 0x100},
2063 {OP_WR, CFC_REG_CONTROL0, 0x10}, 2026 {OP_WR, CFC_REG_CONTROL0, 0x10},
2064 {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff}, 2027 {OP_WR, CFC_REG_DISABLE_ON_ERROR, 0x3fff},
2065 {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a}, 2028 {OP_WR, CFC_REG_LCREQ_WEIGHTS, 0x84924a},
2066#define CFC_COMMON_END 1803 2029#define CFC_COMMON_END 1766
2067#define HC_COMMON_START 1803 2030#define HC_COMMON_START 1766
2068 {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4}, 2031 {OP_ZR_E1, HC_REG_USTORM_ADDR_FOR_COALESCE, 0x4},
2069#define HC_COMMON_END 1804 2032#define HC_COMMON_END 1767
2070#define HC_PORT0_START 1804 2033#define HC_PORT0_START 1767
2071 {OP_WR_E1, HC_REG_CONFIG_0, 0x1080}, 2034 {OP_WR_E1, HC_REG_CONFIG_0, 0x1080},
2072 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2}, 2035 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_0, 0x2},
2073 {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10}, 2036 {OP_WR_E1, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2086,8 +2049,8 @@ static const struct raw_op init_ops[] = {
2086 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2049 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2087 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2050 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2088 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2051 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2089#define HC_PORT0_END 1822 2052#define HC_PORT0_END 1785
2090#define HC_PORT1_START 1822 2053#define HC_PORT1_START 1785
2091 {OP_WR_E1, HC_REG_CONFIG_1, 0x1080}, 2054 {OP_WR_E1, HC_REG_CONFIG_1, 0x1080},
2092 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2}, 2055 {OP_ZR_E1, HC_REG_UC_RAM_ADDR_1, 0x2},
2093 {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10}, 2056 {OP_WR_E1, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2106,8 +2069,8 @@ static const struct raw_op init_ops[] = {
2106 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2069 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2107 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2070 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2108 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2071 {OP_ZR_E1, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2109#define HC_PORT1_END 1840 2072#define HC_PORT1_END 1803
2110#define HC_FUNC0_START 1840 2073#define HC_FUNC0_START 1803
2111 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2074 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2112 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0}, 2075 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x0},
2113 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2076 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2123,8 +2086,8 @@ static const struct raw_op init_ops[] = {
2123 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2086 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2124 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2087 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2125 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2088 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2126#define HC_FUNC0_END 1855 2089#define HC_FUNC0_END 1818
2127#define HC_FUNC1_START 1855 2090#define HC_FUNC1_START 1818
2128 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2091 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2129 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1}, 2092 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x1},
2130 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2093 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2140,8 +2103,8 @@ static const struct raw_op init_ops[] = {
2140 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2103 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2141 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2104 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2142 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2105 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2143#define HC_FUNC1_END 1870 2106#define HC_FUNC1_END 1833
2144#define HC_FUNC2_START 1870 2107#define HC_FUNC2_START 1833
2145 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2108 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2146 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2}, 2109 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x2},
2147 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2110 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2157,8 +2120,8 @@ static const struct raw_op init_ops[] = {
2157 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2120 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2158 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2121 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2159 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2122 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2160#define HC_FUNC2_END 1885 2123#define HC_FUNC2_END 1848
2161#define HC_FUNC3_START 1885 2124#define HC_FUNC3_START 1848
2162 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2125 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2163 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3}, 2126 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x3},
2164 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2127 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2174,8 +2137,8 @@ static const struct raw_op init_ops[] = {
2174 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2137 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2175 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2138 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2176 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2139 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2177#define HC_FUNC3_END 1900 2140#define HC_FUNC3_END 1863
2178#define HC_FUNC4_START 1900 2141#define HC_FUNC4_START 1863
2179 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2142 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2180 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4}, 2143 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x4},
2181 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2144 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2191,8 +2154,8 @@ static const struct raw_op init_ops[] = {
2191 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2154 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2192 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2155 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2193 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2156 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2194#define HC_FUNC4_END 1915 2157#define HC_FUNC4_END 1878
2195#define HC_FUNC5_START 1915 2158#define HC_FUNC5_START 1878
2196 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2159 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2197 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5}, 2160 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x5},
2198 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2161 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2208,8 +2171,8 @@ static const struct raw_op init_ops[] = {
2208 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2171 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2209 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2172 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2210 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2173 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2211#define HC_FUNC5_END 1930 2174#define HC_FUNC5_END 1893
2212#define HC_FUNC6_START 1930 2175#define HC_FUNC6_START 1893
2213 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080}, 2176 {OP_WR_E1H, HC_REG_CONFIG_0, 0x1080},
2214 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6}, 2177 {OP_WR_E1H, HC_REG_FUNC_NUM_P0, 0x6},
2215 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10}, 2178 {OP_WR_E1H, HC_REG_ATTN_NUM_P0, 0x10},
@@ -2225,8 +2188,8 @@ static const struct raw_op init_ops[] = {
2225 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a}, 2188 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x120, 0x4a},
2226 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a}, 2189 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x370, 0x4a},
2227 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a}, 2190 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x5c0, 0x4a},
2228#define HC_FUNC6_END 1945 2191#define HC_FUNC6_END 1908
2229#define HC_FUNC7_START 1945 2192#define HC_FUNC7_START 1908
2230 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080}, 2193 {OP_WR_E1H, HC_REG_CONFIG_1, 0x1080},
2231 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7}, 2194 {OP_WR_E1H, HC_REG_FUNC_NUM_P1, 0x7},
2232 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10}, 2195 {OP_WR_E1H, HC_REG_ATTN_NUM_P1, 0x10},
@@ -2242,8 +2205,8 @@ static const struct raw_op init_ops[] = {
2242 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a}, 2205 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x248, 0x4a},
2243 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a}, 2206 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x498, 0x4a},
2244 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a}, 2207 {OP_ZR_E1H, HC_REG_STATISTIC_COUNTERS + 0x6e8, 0x4a},
2245#define HC_FUNC7_END 1960 2208#define HC_FUNC7_END 1923
2246#define PXP2_COMMON_START 1960 2209#define PXP2_COMMON_START 1923
2247 {OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340}, 2210 {OP_WR_E1, PXP2_REG_PGL_CONTROL0, 0xe38340},
2248 {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1}, 2211 {OP_WR_E1H, PXP2_REG_RQ_DRAM_ALIGN, 0x1},
2249 {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10}, 2212 {OP_WR, PXP2_REG_PGL_CONTROL1, 0x3c10},
@@ -2361,8 +2324,8 @@ static const struct raw_op init_ops[] = {
2361 {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1}, 2324 {OP_WR_E1H, PXP2_REG_RQ_ILT_MODE, 0x1},
2362 {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1}, 2325 {OP_WR, PXP2_REG_RQ_RBC_DONE, 0x1},
2363 {OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340}, 2326 {OP_WR_E1H, PXP2_REG_PGL_CONTROL0, 0xe38340},
2364#define PXP2_COMMON_END 2077 2327#define PXP2_COMMON_END 2040
2365#define MISC_AEU_COMMON_START 2077 2328#define MISC_AEU_COMMON_START 2040
2366 {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16}, 2329 {OP_ZR, MISC_REG_AEU_GENERAL_ATTN_0, 0x16},
2367 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000}, 2330 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_NIG_0, 0x55540000},
2368 {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555}, 2331 {OP_WR_E1H, MISC_REG_AEU_ENABLE2_NIG_0, 0x55555555},
@@ -2382,8 +2345,8 @@ static const struct raw_op init_ops[] = {
2382 {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0}, 2345 {OP_WR_E1H, MISC_REG_AEU_ENABLE4_PXP_1, 0x0},
2383 {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00}, 2346 {OP_WR_E1H, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0xc00},
2384 {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3}, 2347 {OP_WR_E1H, MISC_REG_AEU_GENERAL_MASK, 0x3},
2385#define MISC_AEU_COMMON_END 2096 2348#define MISC_AEU_COMMON_END 2059
2386#define MISC_AEU_PORT0_START 2096 2349#define MISC_AEU_PORT0_START 2059
2387 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000}, 2350 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xbf5c0000},
2388 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000}, 2351 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, 0xff5c0000},
2389 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef}, 2352 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0, 0xfff51fef},
@@ -2416,8 +2379,8 @@ static const struct raw_op init_ops[] = {
2416 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0}, 2379 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_0, 0x0},
2417 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3}, 2380 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_0, 0x3},
2418 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7}, 2381 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_0, 0x7},
2419#define MISC_AEU_PORT0_END 2128 2382#define MISC_AEU_PORT0_END 2091
2420#define MISC_AEU_PORT1_START 2128 2383#define MISC_AEU_PORT1_START 2091
2421 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000}, 2384 {OP_WR_E1, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xbf5c0000},
2422 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000}, 2385 {OP_WR_E1H, MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0, 0xff5c0000},
2423 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef}, 2386 {OP_WR_E1, MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0, 0xfff51fef},
@@ -2450,7 +2413,7 @@ static const struct raw_op init_ops[] = {
2450 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0}, 2413 {OP_WR_E1, MISC_REG_AEU_INVERTER_1_FUNC_1, 0x0},
2451 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3}, 2414 {OP_ZR_E1, MISC_REG_AEU_INVERTER_2_FUNC_1, 0x3},
2452 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7}, 2415 {OP_WR_E1, MISC_REG_AEU_MASK_ATTN_FUNC_1, 0x7},
2453#define MISC_AEU_PORT1_END 2160 2416#define MISC_AEU_PORT1_END 2123
2454 2417
2455}; 2418};
2456 2419
@@ -2560,103 +2523,92 @@ static const u32 init_data_e1[] = {
2560 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80, 2523 0x00049c00, 0x00051f80, 0x0005a300, 0x00062680, 0x0006aa00, 0x00072d80,
2561 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280, 2524 0x0007b100, 0x00083480, 0x0008b800, 0x00093b80, 0x0009bf00, 0x000a4280,
2562 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780, 2525 0x000ac600, 0x000b4980, 0x000bcd00, 0x000c5080, 0x000cd400, 0x000d5780,
2563 0x000ddb00, 0x00001900, 0x00000028, 0x00000000, 0x00100000, 0x00000000, 2526 0x000ddb00, 0x00001900, 0x00100000, 0x00000000, 0x00000000, 0xffffffff,
2564 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2565 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2527 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2566 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2528 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2567 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2529 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2568 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2530 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2569 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
2570 0x00000000, 0x00001500, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
2571 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x40000000, 0x40000000,
2572 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2531 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2532 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000, 0x00001500,
2533 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
2534 0xffffffff, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2573 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2535 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2574 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2536 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2575 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2537 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2576 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2538 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2577 0x00000000, 0x00007ff8, 0x00000000, 0x00003500, 0x00001000, 0x00002080, 2539 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8,
2578 0x00003100, 0x00004180, 0x00005200, 0x00006280, 0x00007300, 0x00008380, 2540 0x00000000, 0x00003500, 0x00001000, 0x00002080, 0x00003100, 0x00004180,
2579 0x00009400, 0x0000a480, 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 2541 0x00005200, 0x00006280, 0x00007300, 0x00008380, 0x00009400, 0x0000a480,
2580 0x0000f700, 0x00010780, 0x00011800, 0x00012880, 0x00013900, 0x00014980, 2542 0x0000b500, 0x0000c580, 0x0000d600, 0x0000e680, 0x0000f700, 0x00010780,
2581 0x00015a00, 0x00016a80, 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 2543 0x00011800, 0x00012880, 0x00013900, 0x00014980, 0x00015a00, 0x00016a80,
2582 0x0001bd00, 0x0001cd80, 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 2544 0x00017b00, 0x00018b80, 0x00019c00, 0x0001ac80, 0x0001bd00, 0x0001cd80,
2583 0x00010001, 0x00000604, 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 2545 0x0001de00, 0x0001ee80, 0x0001ff00, 0x00000000, 0x00010001, 0x00000604,
2584 0xcccccccc, 0x00000000, 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 2546 0xccccccc1, 0xffffffff, 0xffffffff, 0xcccc0201, 0xcccccccc, 0x00000000,
2547 0xffffffff, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2585 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2548 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2586 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2549 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2587 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2550 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2588 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 2551 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000,
2589 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x40000000, 0x00000000, 2552 0x40000000, 0x40000000, 0x40000000, 0x00000000, 0x00007ff8, 0x00000000,
2590 0x00007ff8, 0x00000000, 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 2553 0x00003500, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2591 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2554 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2555 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
2592 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2556 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2593 0x00000000, 0x00100000, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2594 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2557 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff,
2595 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 2558 0x00000000, 0x0000ffff, 0x00000000, 0x0000ffff, 0x00000000, 0x00100000,
2596 0x00000000, 0x00100000, 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 2559 0x00000000, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2597 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 2560 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x30efffff, 0x0c30c30c,
2598 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
2599 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2600 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
2601 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
2602 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
2603 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2604 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c,
2605 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
2606 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
2607 0xcdcdcdcd, 0xfffffff3, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2608 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
2609 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 2561 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
2610 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 2562 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
2611 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 2563 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
2612 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 2564 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
2613 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 2565 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa,
2614 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 2566 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c,
2615 0xcdcdcdcd, 0xfffffff7, 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2567 0xcdcdcdcd, 0xfffffff7, 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2616 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 2568 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x302fffff, 0x0c30c30c,
2617 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 2569 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
2618 0x31efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 2570 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
2619 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2571 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2620 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 2572 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
2621 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 2573 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
2622 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 2574 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
2623 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2575 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2624 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 2576 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
2625 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 2577 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xfffffff7,
2626 0x056fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 2578 0x30efffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0020cf3c,
2627 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2579 0xcdcdcdcd, 0xfffffff5, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2628 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 2580 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3, 0x31efffff, 0x0c30c30c,
2629 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 2581 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1,
2630 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 2582 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c,
2631 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2583 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2632 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 2584 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305,
2633 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 2585 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2,
2634 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 2586 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c,
2635 0xcdcdcdcd, 0xffffff8a, 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 2587 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2636 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 2588 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97, 0x056fffff, 0x0c30c30c,
2637 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 2589 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5,
2638 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 2590 0x310fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c,
2639 0xcdcdcdcd, 0xfffffff3, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 2591 0xcdcdcdcd, 0xfffffff3, 0x320fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2640 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 2592 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xfffffff1, 0x310fffff, 0x0c30c30c,
2641 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 2593 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6,
2642 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 2594 0x305fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c,
2643 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 2595 0xcdcdcdcd, 0xfffff406, 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014,
2644 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 2596 0xf3cf3cf3, 0x0004cf3c, 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c,
2645 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 2597 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffff8a,
2646 0x302fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 2598 0x042fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0010cf3c,
2647 0xcdcdcdcd, 0xffffff97, 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 2599 0xcdcdcdcd, 0xffffff97, 0x05cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000,
2648 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 2600 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xfffffff5, 0x310fffff, 0x0c30c30c,
2649 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 2601 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xfffffff3,
2650 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 2602 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0000cf3c,
2651 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2603 0xcdcdcdcd, 0xfffffff1, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2652 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2604 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xfffffff6, 0x305fffff, 0x0c30c30c,
2653 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff, 2605 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xfffff406,
2654 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c, 2606 0x1cbfffff, 0x0c30c305, 0xc30c30c3, 0xcf300014, 0xf3cf3cf3, 0x0004cf3c,
2655 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2607 0xcdcdcdcd, 0xfffffff2, 0x304fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2656 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2608 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xfffffffa, 0x302fffff, 0x0c30c30c,
2657 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff, 2609 0xc30c30c3, 0xcf3cf300, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffff97,
2658 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c, 2610 0x040fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cc000, 0xf3cf3cf3, 0x0020cf3c,
2659 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2611 0xcdcdcdcd, 0xfffffff5, 0x300fffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf300,
2660 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2612 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2661 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff, 2613 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c, 0xcdcdcdcd, 0xffffffff,
2662 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c, 2614 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0001cf3c,
@@ -2678,16 +2630,27 @@ static const u32 init_data_e1[] = {
2678 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 2630 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c,
2679 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 2631 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2680 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 2632 0xf3cf3cf3, 0x0020cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2681 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 2633 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0xffffffff,
2682 0x00070100, 0x00028170, 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 2634 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0000cf3c,
2683 0x00010370, 0x00080000, 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 2635 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2684 0x00010200, 0x00070210, 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 2636 0xf3cf3cf3, 0x0001cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2685 0x000b8198, 0x00020250, 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 2637 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0002cf3c, 0xcdcdcdcd, 0xffffffff,
2686 0x00080100, 0x00028180, 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 2638 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0004cf3c,
2687 0x00080380, 0x00028000, 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 2639 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2688 0x00000118, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 2640 0xf3cf3cf3, 0x0008cf3c, 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c,
2689 0xcccccccc, 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 2641 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0010cf3c, 0xcdcdcdcd, 0xffffffff,
2690 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000 2642 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc, 0xf3cf3cf3, 0x0020cf3c,
2643 0xcdcdcdcd, 0xffffffff, 0x30cfffff, 0x0c30c30c, 0xc30c30c3, 0xcf3cf3cc,
2644 0xf3cf3cf3, 0x0040cf3c, 0xcdcdcdcd, 0x00100000, 0x00070100, 0x00028170,
2645 0x000b8198, 0x00020250, 0x00010270, 0x000f0280, 0x00010370, 0x00080000,
2646 0x00080080, 0x00028100, 0x000b8128, 0x000201e0, 0x00010200, 0x00070210,
2647 0x00020280, 0x000f0000, 0x000800f0, 0x00028170, 0x000b8198, 0x00020250,
2648 0x00010270, 0x000b8280, 0x00080338, 0x00100000, 0x00080100, 0x00028180,
2649 0x000b81a8, 0x00020260, 0x00018280, 0x000e8298, 0x00080380, 0x00028000,
2650 0x000b8028, 0x000200e0, 0x00010100, 0x00008110, 0x00000118, 0xcccccccc,
2651 0xcccccccc, 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc,
2652 0xcccccccc, 0xcccccccc, 0x00002000, 0xcccccccc, 0xcccccccc, 0xcccccccc,
2653 0xcccccccc, 0x00002000
2691}; 2654};
2692 2655
2693static const u32 init_data_e1h[] = { 2656static const u32 init_data_e1h[] = {
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index ff2743db10d9..8b92c6ad0759 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -31,17 +31,16 @@
31 31
32/********************************************************/ 32/********************************************************/
33#define SUPPORT_CL73 0 /* Currently no */ 33#define SUPPORT_CL73 0 /* Currently no */
34#define ETH_HLEN 14 34#define ETH_HLEN 14
35#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/ 35#define ETH_OVREHEAD (ETH_HLEN + 8)/* 8 for CRC + VLAN*/
36#define ETH_MIN_PACKET_SIZE 60 36#define ETH_MIN_PACKET_SIZE 60
37#define ETH_MAX_PACKET_SIZE 1500 37#define ETH_MAX_PACKET_SIZE 1500
38#define ETH_MAX_JUMBO_PACKET_SIZE 9600 38#define ETH_MAX_JUMBO_PACKET_SIZE 9600
39#define MDIO_ACCESS_TIMEOUT 1000 39#define MDIO_ACCESS_TIMEOUT 1000
40#define BMAC_CONTROL_RX_ENABLE 2 40#define BMAC_CONTROL_RX_ENABLE 2
41#define MAX_MTU_SIZE 5000
42 41
43/***********************************************************/ 42/***********************************************************/
44/* Shortcut definitions */ 43/* Shortcut definitions */
45/***********************************************************/ 44/***********************************************************/
46 45
47#define NIG_STATUS_XGXS0_LINK10G \ 46#define NIG_STATUS_XGXS0_LINK10G \
@@ -80,12 +79,12 @@
80 79
81#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 80#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
82#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 81#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
83#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM 82#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
84#define AUTONEG_PARALLEL \ 83#define AUTONEG_PARALLEL \
85 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION 84 SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
86#define AUTONEG_SGMII_FIBER_AUTODET \ 85#define AUTONEG_SGMII_FIBER_AUTODET \
87 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT 86 SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
88#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY 87#define AUTONEG_REMOTE_PHY SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
89 88
90#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \ 89#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
91 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE 90 MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
@@ -202,11 +201,10 @@ static void bnx2x_emac_init(struct link_params *params,
202 /* init emac - use read-modify-write */ 201 /* init emac - use read-modify-write */
203 /* self clear reset */ 202 /* self clear reset */
204 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 203 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
205 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET)); 204 EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
206 205
207 timeout = 200; 206 timeout = 200;
208 do 207 do {
209 {
210 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 208 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
211 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val); 209 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
212 if (!timeout) { 210 if (!timeout) {
@@ -214,18 +212,18 @@ static void bnx2x_emac_init(struct link_params *params,
214 return; 212 return;
215 } 213 }
216 timeout--; 214 timeout--;
217 }while (val & EMAC_MODE_RESET); 215 } while (val & EMAC_MODE_RESET);
218 216
219 /* Set mac address */ 217 /* Set mac address */
220 val = ((params->mac_addr[0] << 8) | 218 val = ((params->mac_addr[0] << 8) |
221 params->mac_addr[1]); 219 params->mac_addr[1]);
222 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val); 220 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
223 221
224 val = ((params->mac_addr[2] << 24) | 222 val = ((params->mac_addr[2] << 24) |
225 (params->mac_addr[3] << 16) | 223 (params->mac_addr[3] << 16) |
226 (params->mac_addr[4] << 8) | 224 (params->mac_addr[4] << 8) |
227 params->mac_addr[5]); 225 params->mac_addr[5]);
228 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); 226 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
229} 227}
230 228
231static u8 bnx2x_emac_enable(struct link_params *params, 229static u8 bnx2x_emac_enable(struct link_params *params,
@@ -286,7 +284,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
286 if (CHIP_REV_IS_SLOW(bp)) { 284 if (CHIP_REV_IS_SLOW(bp)) {
287 /* config GMII mode */ 285 /* config GMII mode */
288 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 286 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
289 EMAC_WR(EMAC_REG_EMAC_MODE, 287 EMAC_WR(bp, EMAC_REG_EMAC_MODE,
290 (val | EMAC_MODE_PORT_GMII)); 288 (val | EMAC_MODE_PORT_GMII));
291 } else { /* ASIC */ 289 } else { /* ASIC */
292 /* pause enable/disable */ 290 /* pause enable/disable */
@@ -298,17 +296,19 @@ static u8 bnx2x_emac_enable(struct link_params *params,
298 EMAC_RX_MODE_FLOW_EN); 296 EMAC_RX_MODE_FLOW_EN);
299 297
300 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 298 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
301 EMAC_TX_MODE_EXT_PAUSE_EN); 299 (EMAC_TX_MODE_EXT_PAUSE_EN |
300 EMAC_TX_MODE_FLOW_EN));
302 if (vars->flow_ctrl & FLOW_CTRL_TX) 301 if (vars->flow_ctrl & FLOW_CTRL_TX)
303 bnx2x_bits_en(bp, emac_base + 302 bnx2x_bits_en(bp, emac_base +
304 EMAC_REG_EMAC_TX_MODE, 303 EMAC_REG_EMAC_TX_MODE,
305 EMAC_TX_MODE_EXT_PAUSE_EN); 304 (EMAC_TX_MODE_EXT_PAUSE_EN |
305 EMAC_TX_MODE_FLOW_EN));
306 } 306 }
307 307
308 /* KEEP_VLAN_TAG, promiscuous */ 308 /* KEEP_VLAN_TAG, promiscuous */
309 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 309 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
310 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 310 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
311 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val); 311 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
312 312
313 /* Set Loopback */ 313 /* Set Loopback */
314 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); 314 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
@@ -316,10 +316,10 @@ static u8 bnx2x_emac_enable(struct link_params *params,
316 val |= 0x810; 316 val |= 0x810;
317 else 317 else
318 val &= ~0x810; 318 val &= ~0x810;
319 EMAC_WR(EMAC_REG_EMAC_MODE, val); 319 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
320 320
321 /* enable emac for jumbo packets */ 321 /* enable emac for jumbo packets */
322 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE, 322 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
323 (EMAC_RX_MTU_SIZE_JUMBO_ENA | 323 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
324 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); 324 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
325 325
@@ -591,9 +591,9 @@ void bnx2x_link_status_update(struct link_params *params,
591 vars->flow_ctrl &= ~FLOW_CTRL_RX; 591 vars->flow_ctrl &= ~FLOW_CTRL_RX;
592 592
593 if (vars->phy_flags & PHY_XGXS_FLAG) { 593 if (vars->phy_flags & PHY_XGXS_FLAG) {
594 if (params->req_line_speed && 594 if (vars->line_speed &&
595 ((params->req_line_speed == SPEED_10) || 595 ((vars->line_speed == SPEED_10) ||
596 (params->req_line_speed == SPEED_100))) { 596 (vars->line_speed == SPEED_100))) {
597 vars->phy_flags |= PHY_SGMII_FLAG; 597 vars->phy_flags |= PHY_SGMII_FLAG;
598 } else { 598 } else {
599 vars->phy_flags &= ~PHY_SGMII_FLAG; 599 vars->phy_flags &= ~PHY_SGMII_FLAG;
@@ -645,7 +645,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
645 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : 645 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
646 NIG_REG_INGRESS_BMAC0_MEM; 646 NIG_REG_INGRESS_BMAC0_MEM;
647 u32 wb_data[2]; 647 u32 wb_data[2];
648 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); 648 u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
649 649
650 /* Only if the bmac is out of reset */ 650 /* Only if the bmac is out of reset */
651 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 651 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -670,7 +670,6 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
670 u8 port = params->port; 670 u8 port = params->port;
671 u32 init_crd, crd; 671 u32 init_crd, crd;
672 u32 count = 1000; 672 u32 count = 1000;
673 u32 pause = 0;
674 673
675 /* disable port */ 674 /* disable port */
676 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); 675 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
@@ -693,33 +692,25 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
693 return -EINVAL; 692 return -EINVAL;
694 } 693 }
695 694
696 if (flow_ctrl & FLOW_CTRL_RX) 695 if (flow_ctrl & FLOW_CTRL_RX ||
697 pause = 1; 696 line_speed == SPEED_10 ||
698 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause); 697 line_speed == SPEED_100 ||
699 if (pause) { 698 line_speed == SPEED_1000 ||
699 line_speed == SPEED_2500) {
700 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
700 /* update threshold */ 701 /* update threshold */
701 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); 702 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
702 /* update init credit */ 703 /* update init credit */
703 init_crd = 778; /* (800-18-4) */ 704 init_crd = 778; /* (800-18-4) */
704 705
705 } else { 706 } else {
706 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + 707 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
707 ETH_OVREHEAD)/16; 708 ETH_OVREHEAD)/16;
708 709 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
709 /* update threshold */ 710 /* update threshold */
710 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); 711 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
711 /* update init credit */ 712 /* update init credit */
712 switch (line_speed) { 713 switch (line_speed) {
713 case SPEED_10:
714 case SPEED_100:
715 case SPEED_1000:
716 init_crd = thresh + 55 - 22;
717 break;
718
719 case SPEED_2500:
720 init_crd = thresh + 138 - 22;
721 break;
722
723 case SPEED_10000: 714 case SPEED_10000:
724 init_crd = thresh + 553 - 22; 715 init_crd = thresh + 553 - 22;
725 break; 716 break;
@@ -764,10 +755,10 @@ static u32 bnx2x_get_emac_base(u32 ext_phy_type, u8 port)
764 emac_base = GRCBASE_EMAC0; 755 emac_base = GRCBASE_EMAC0;
765 break; 756 break;
766 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 757 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
767 emac_base = (port) ? GRCBASE_EMAC0: GRCBASE_EMAC1; 758 emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
768 break; 759 break;
769 default: 760 default:
770 emac_base = (port) ? GRCBASE_EMAC1: GRCBASE_EMAC0; 761 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
771 break; 762 break;
772 } 763 }
773 return emac_base; 764 return emac_base;
@@ -1044,7 +1035,7 @@ static void bnx2x_set_swap_lanes(struct link_params *params)
1044} 1035}
1045 1036
1046static void bnx2x_set_parallel_detection(struct link_params *params, 1037static void bnx2x_set_parallel_detection(struct link_params *params,
1047 u8 phy_flags) 1038 u8 phy_flags)
1048{ 1039{
1049 struct bnx2x *bp = params->bp; 1040 struct bnx2x *bp = params->bp;
1050 u16 control2; 1041 u16 control2;
@@ -1114,7 +1105,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1114 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val); 1105 MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
1115 1106
1116 /* CL37 Autoneg Enabled */ 1107 /* CL37 Autoneg Enabled */
1117 if (params->req_line_speed == SPEED_AUTO_NEG) 1108 if (vars->line_speed == SPEED_AUTO_NEG)
1118 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN; 1109 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
1119 else /* CL37 Autoneg Disabled */ 1110 else /* CL37 Autoneg Disabled */
1120 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | 1111 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
@@ -1132,7 +1123,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1132 MDIO_REG_BANK_SERDES_DIGITAL, 1123 MDIO_REG_BANK_SERDES_DIGITAL,
1133 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val); 1124 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
1134 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN; 1125 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
1135 if (params->req_line_speed == SPEED_AUTO_NEG) 1126 if (vars->line_speed == SPEED_AUTO_NEG)
1136 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1127 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
1137 else 1128 else
1138 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; 1129 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
@@ -1148,7 +1139,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1148 MDIO_REG_BANK_BAM_NEXT_PAGE, 1139 MDIO_REG_BANK_BAM_NEXT_PAGE,
1149 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, 1140 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
1150 &reg_val); 1141 &reg_val);
1151 if (params->req_line_speed == SPEED_AUTO_NEG) { 1142 if (vars->line_speed == SPEED_AUTO_NEG) {
1152 /* Enable BAM aneg Mode and TetonII aneg Mode */ 1143 /* Enable BAM aneg Mode and TetonII aneg Mode */
1153 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | 1144 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
1154 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); 1145 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
@@ -1164,7 +1155,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1164 reg_val); 1155 reg_val);
1165 1156
1166 /* Enable Clause 73 Aneg */ 1157 /* Enable Clause 73 Aneg */
1167 if ((params->req_line_speed == SPEED_AUTO_NEG) && 1158 if ((vars->line_speed == SPEED_AUTO_NEG) &&
1168 (SUPPORT_CL73)) { 1159 (SUPPORT_CL73)) {
1169 /* Enable BAM Station Manager */ 1160 /* Enable BAM Station Manager */
1170 1161
@@ -1226,7 +1217,8 @@ static void bnx2x_set_autoneg(struct link_params *params,
1226} 1217}
1227 1218
1228/* program SerDes, forced speed */ 1219/* program SerDes, forced speed */
1229static void bnx2x_program_serdes(struct link_params *params) 1220static void bnx2x_program_serdes(struct link_params *params,
1221 struct link_vars *vars)
1230{ 1222{
1231 struct bnx2x *bp = params->bp; 1223 struct bnx2x *bp = params->bp;
1232 u16 reg_val; 1224 u16 reg_val;
@@ -1248,28 +1240,35 @@ static void bnx2x_program_serdes(struct link_params *params)
1248 1240
1249 /* program speed 1241 /* program speed
1250 - needed only if the speed is greater than 1G (2.5G or 10G) */ 1242 - needed only if the speed is greater than 1G (2.5G or 10G) */
1251 if (!((params->req_line_speed == SPEED_1000) || 1243 CL45_RD_OVER_CL22(bp, params->port,
1252 (params->req_line_speed == SPEED_100) ||
1253 (params->req_line_speed == SPEED_10))) {
1254 CL45_RD_OVER_CL22(bp, params->port,
1255 params->phy_addr, 1244 params->phy_addr,
1256 MDIO_REG_BANK_SERDES_DIGITAL, 1245 MDIO_REG_BANK_SERDES_DIGITAL,
1257 MDIO_SERDES_DIGITAL_MISC1, &reg_val); 1246 MDIO_SERDES_DIGITAL_MISC1, &reg_val);
1258 /* clearing the speed value before setting the right speed */ 1247 /* clearing the speed value before setting the right speed */
1259 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK; 1248 DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
1249
1250 reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
1251 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1252
1253 if (!((vars->line_speed == SPEED_1000) ||
1254 (vars->line_speed == SPEED_100) ||
1255 (vars->line_speed == SPEED_10))) {
1256
1260 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M | 1257 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
1261 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL); 1258 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
1262 if (params->req_line_speed == SPEED_10000) 1259 if (vars->line_speed == SPEED_10000)
1263 reg_val |= 1260 reg_val |=
1264 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4; 1261 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
1265 if (params->req_line_speed == SPEED_13000) 1262 if (vars->line_speed == SPEED_13000)
1266 reg_val |= 1263 reg_val |=
1267 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; 1264 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
1268 CL45_WR_OVER_CL22(bp, params->port, 1265 }
1266
1267 CL45_WR_OVER_CL22(bp, params->port,
1269 params->phy_addr, 1268 params->phy_addr,
1270 MDIO_REG_BANK_SERDES_DIGITAL, 1269 MDIO_REG_BANK_SERDES_DIGITAL,
1271 MDIO_SERDES_DIGITAL_MISC1, reg_val); 1270 MDIO_SERDES_DIGITAL_MISC1, reg_val);
1272 } 1271
1273} 1272}
1274 1273
1275static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params) 1274static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
@@ -1295,48 +1294,49 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1295 MDIO_OVER_1G_UP3, 0); 1294 MDIO_OVER_1G_UP3, 0);
1296} 1295}
1297 1296
1298static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, 1297static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc)
1299 u32 *ieee_fc)
1300{ 1298{
1301 struct bnx2x *bp = params->bp; 1299 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1302 /* for AN, we are always publishing full duplex */
1303 u16 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1304
1305 /* resolve pause mode and advertisement 1300 /* resolve pause mode and advertisement
1306 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 1301 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
1307 1302
1308 switch (params->req_flow_ctrl) { 1303 switch (params->req_flow_ctrl) {
1309 case FLOW_CTRL_AUTO: 1304 case FLOW_CTRL_AUTO:
1310 if (params->mtu <= MAX_MTU_SIZE) { 1305 if (params->req_fc_auto_adv == FLOW_CTRL_BOTH) {
1311 an_adv |= 1306 *ieee_fc |=
1312 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 1307 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1313 } else { 1308 } else {
1314 an_adv |= 1309 *ieee_fc |=
1315 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 1310 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1316 } 1311 }
1317 break; 1312 break;
1318 case FLOW_CTRL_TX: 1313 case FLOW_CTRL_TX:
1319 an_adv |= 1314 *ieee_fc |=
1320 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 1315 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1321 break; 1316 break;
1322 1317
1323 case FLOW_CTRL_RX: 1318 case FLOW_CTRL_RX:
1324 case FLOW_CTRL_BOTH: 1319 case FLOW_CTRL_BOTH:
1325 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 1320 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1326 break; 1321 break;
1327 1322
1328 case FLOW_CTRL_NONE: 1323 case FLOW_CTRL_NONE:
1329 default: 1324 default:
1330 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 1325 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
1331 break; 1326 break;
1332 } 1327 }
1328}
1333 1329
1334 *ieee_fc = an_adv; 1330static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
1331 u32 ieee_fc)
1332{
1333 struct bnx2x *bp = params->bp;
1334 /* for AN, we are always publishing full duplex */
1335 1335
1336 CL45_WR_OVER_CL22(bp, params->port, 1336 CL45_WR_OVER_CL22(bp, params->port,
1337 params->phy_addr, 1337 params->phy_addr,
1338 MDIO_REG_BANK_COMBO_IEEE0, 1338 MDIO_REG_BANK_COMBO_IEEE0,
1339 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv); 1339 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, (u16)ieee_fc);
1340} 1340}
1341 1341
1342static void bnx2x_restart_autoneg(struct link_params *params) 1342static void bnx2x_restart_autoneg(struct link_params *params)
@@ -1382,7 +1382,8 @@ static void bnx2x_restart_autoneg(struct link_params *params)
1382 } 1382 }
1383} 1383}
1384 1384
1385static void bnx2x_initialize_sgmii_process(struct link_params *params) 1385static void bnx2x_initialize_sgmii_process(struct link_params *params,
1386 struct link_vars *vars)
1386{ 1387{
1387 struct bnx2x *bp = params->bp; 1388 struct bnx2x *bp = params->bp;
1388 u16 control1; 1389 u16 control1;
@@ -1406,7 +1407,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1406 control1); 1407 control1);
1407 1408
1408 /* if forced speed */ 1409 /* if forced speed */
1409 if (!(params->req_line_speed == SPEED_AUTO_NEG)) { 1410 if (!(vars->line_speed == SPEED_AUTO_NEG)) {
1410 /* set speed, disable autoneg */ 1411 /* set speed, disable autoneg */
1411 u16 mii_control; 1412 u16 mii_control;
1412 1413
@@ -1419,7 +1420,7 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1419 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| 1420 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
1420 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); 1421 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
1421 1422
1422 switch (params->req_line_speed) { 1423 switch (vars->line_speed) {
1423 case SPEED_100: 1424 case SPEED_100:
1424 mii_control |= 1425 mii_control |=
1425 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100; 1426 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
@@ -1433,8 +1434,8 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1433 break; 1434 break;
1434 default: 1435 default:
1435 /* invalid speed for SGMII */ 1436 /* invalid speed for SGMII */
1436 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n", 1437 DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
1437 params->req_line_speed); 1438 vars->line_speed);
1438 break; 1439 break;
1439 } 1440 }
1440 1441
@@ -1460,20 +1461,20 @@ static void bnx2x_initialize_sgmii_process(struct link_params *params)
1460 */ 1461 */
1461 1462
1462static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 1463static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
1463{ 1464{ /* LD LP */
1464 switch (pause_result) { /* ASYM P ASYM P */ 1465 switch (pause_result) { /* ASYM P ASYM P */
1465 case 0xb: /* 1 0 1 1 */ 1466 case 0xb: /* 1 0 1 1 */
1466 vars->flow_ctrl = FLOW_CTRL_TX; 1467 vars->flow_ctrl = FLOW_CTRL_TX;
1467 break; 1468 break;
1468 1469
1469 case 0xe: /* 1 1 1 0 */ 1470 case 0xe: /* 1 1 1 0 */
1470 vars->flow_ctrl = FLOW_CTRL_RX; 1471 vars->flow_ctrl = FLOW_CTRL_RX;
1471 break; 1472 break;
1472 1473
1473 case 0x5: /* 0 1 0 1 */ 1474 case 0x5: /* 0 1 0 1 */
1474 case 0x7: /* 0 1 1 1 */ 1475 case 0x7: /* 0 1 1 1 */
1475 case 0xd: /* 1 1 0 1 */ 1476 case 0xd: /* 1 1 0 1 */
1476 case 0xf: /* 1 1 1 1 */ 1477 case 0xf: /* 1 1 1 1 */
1477 vars->flow_ctrl = FLOW_CTRL_BOTH; 1478 vars->flow_ctrl = FLOW_CTRL_BOTH;
1478 break; 1479 break;
1479 1480
@@ -1531,6 +1532,28 @@ static u8 bnx2x_ext_phy_resove_fc(struct link_params *params,
1531 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", 1532 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1532 pause_result); 1533 pause_result);
1533 bnx2x_pause_resolve(vars, pause_result); 1534 bnx2x_pause_resolve(vars, pause_result);
1535 if (vars->flow_ctrl == FLOW_CTRL_NONE &&
1536 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
1537 bnx2x_cl45_read(bp, port,
1538 ext_phy_type,
1539 ext_phy_addr,
1540 MDIO_AN_DEVAD,
1541 MDIO_AN_REG_CL37_FC_LD, &ld_pause);
1542
1543 bnx2x_cl45_read(bp, port,
1544 ext_phy_type,
1545 ext_phy_addr,
1546 MDIO_AN_DEVAD,
1547 MDIO_AN_REG_CL37_FC_LP, &lp_pause);
1548 pause_result = (ld_pause &
1549 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
1550 pause_result |= (lp_pause &
1551 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1552
1553 bnx2x_pause_resolve(vars, pause_result);
1554 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n",
1555 pause_result);
1556 }
1534 } 1557 }
1535 return ret; 1558 return ret;
1536} 1559}
@@ -1541,8 +1564,8 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1541 u32 gp_status) 1564 u32 gp_status)
1542{ 1565{
1543 struct bnx2x *bp = params->bp; 1566 struct bnx2x *bp = params->bp;
1544 u16 ld_pause; /* local driver */ 1567 u16 ld_pause; /* local driver */
1545 u16 lp_pause; /* link partner */ 1568 u16 lp_pause; /* link partner */
1546 u16 pause_result; 1569 u16 pause_result;
1547 1570
1548 vars->flow_ctrl = FLOW_CTRL_NONE; 1571 vars->flow_ctrl = FLOW_CTRL_NONE;
@@ -1573,13 +1596,10 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1573 (bnx2x_ext_phy_resove_fc(params, vars))) { 1596 (bnx2x_ext_phy_resove_fc(params, vars))) {
1574 return; 1597 return;
1575 } else { 1598 } else {
1576 vars->flow_ctrl = params->req_flow_ctrl; 1599 if (params->req_flow_ctrl == FLOW_CTRL_AUTO)
1577 if (vars->flow_ctrl == FLOW_CTRL_AUTO) { 1600 vars->flow_ctrl = params->req_fc_auto_adv;
1578 if (params->mtu <= MAX_MTU_SIZE) 1601 else
1579 vars->flow_ctrl = FLOW_CTRL_BOTH; 1602 vars->flow_ctrl = params->req_flow_ctrl;
1580 else
1581 vars->flow_ctrl = FLOW_CTRL_TX;
1582 }
1583 } 1603 }
1584 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl); 1604 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
1585} 1605}
@@ -1590,6 +1610,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1590 u32 gp_status) 1610 u32 gp_status)
1591{ 1611{
1592 struct bnx2x *bp = params->bp; 1612 struct bnx2x *bp = params->bp;
1613
1593 u8 rc = 0; 1614 u8 rc = 0;
1594 vars->link_status = 0; 1615 vars->link_status = 0;
1595 1616
@@ -1690,7 +1711,11 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1690 1711
1691 vars->link_status |= LINK_STATUS_SERDES_LINK; 1712 vars->link_status |= LINK_STATUS_SERDES_LINK;
1692 1713
1693 if (params->req_line_speed == SPEED_AUTO_NEG) { 1714 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
1715 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1716 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
1717 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1718 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705))) {
1694 vars->autoneg = AUTO_NEG_ENABLED; 1719 vars->autoneg = AUTO_NEG_ENABLED;
1695 1720
1696 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { 1721 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
@@ -1705,18 +1730,18 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1705 1730
1706 } 1731 }
1707 if (vars->flow_ctrl & FLOW_CTRL_TX) 1732 if (vars->flow_ctrl & FLOW_CTRL_TX)
1708 vars->link_status |= 1733 vars->link_status |=
1709 LINK_STATUS_TX_FLOW_CONTROL_ENABLED; 1734 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1710 1735
1711 if (vars->flow_ctrl & FLOW_CTRL_RX) 1736 if (vars->flow_ctrl & FLOW_CTRL_RX)
1712 vars->link_status |= 1737 vars->link_status |=
1713 LINK_STATUS_RX_FLOW_CONTROL_ENABLED; 1738 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1714 1739
1715 } else { /* link_down */ 1740 } else { /* link_down */
1716 DP(NETIF_MSG_LINK, "phy link down\n"); 1741 DP(NETIF_MSG_LINK, "phy link down\n");
1717 1742
1718 vars->phy_link_up = 0; 1743 vars->phy_link_up = 0;
1719 vars->line_speed = 0; 1744
1720 vars->duplex = DUPLEX_FULL; 1745 vars->duplex = DUPLEX_FULL;
1721 vars->flow_ctrl = FLOW_CTRL_NONE; 1746 vars->flow_ctrl = FLOW_CTRL_NONE;
1722 vars->autoneg = AUTO_NEG_DISABLED; 1747 vars->autoneg = AUTO_NEG_DISABLED;
@@ -1817,15 +1842,15 @@ static u8 bnx2x_emac_program(struct link_params *params,
1817} 1842}
1818 1843
1819/*****************************************************************************/ 1844/*****************************************************************************/
1820/* External Phy section */ 1845/* External Phy section */
1821/*****************************************************************************/ 1846/*****************************************************************************/
1822static void bnx2x_hw_reset(struct bnx2x *bp) 1847static void bnx2x_hw_reset(struct bnx2x *bp, u8 port)
1823{ 1848{
1824 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1849 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1825 MISC_REGISTERS_GPIO_OUTPUT_LOW); 1850 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1826 msleep(1); 1851 msleep(1);
1827 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1852 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1828 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1853 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
1829} 1854}
1830 1855
1831static void bnx2x_ext_phy_reset(struct link_params *params, 1856static void bnx2x_ext_phy_reset(struct link_params *params,
@@ -1854,10 +1879,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1854 1879
1855 /* Restore normal power mode*/ 1880 /* Restore normal power mode*/
1856 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1881 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1857 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1882 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1883 params->port);
1858 1884
1859 /* HW reset */ 1885 /* HW reset */
1860 bnx2x_hw_reset(bp); 1886 bnx2x_hw_reset(bp, params->port);
1861 1887
1862 bnx2x_cl45_write(bp, params->port, 1888 bnx2x_cl45_write(bp, params->port,
1863 ext_phy_type, 1889 ext_phy_type,
@@ -1869,7 +1895,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1869 /* Unset Low Power Mode and SW reset */ 1895 /* Unset Low Power Mode and SW reset */
1870 /* Restore normal power mode*/ 1896 /* Restore normal power mode*/
1871 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1897 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1872 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1898 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1899 params->port);
1873 1900
1874 DP(NETIF_MSG_LINK, "XGXS 8072\n"); 1901 DP(NETIF_MSG_LINK, "XGXS 8072\n");
1875 bnx2x_cl45_write(bp, params->port, 1902 bnx2x_cl45_write(bp, params->port,
@@ -1887,19 +1914,14 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1887 1914
1888 /* Restore normal power mode*/ 1915 /* Restore normal power mode*/
1889 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1916 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1890 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1917 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1918 params->port);
1891 1919
1892 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 1920 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1893 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1921 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1922 params->port);
1894 1923
1895 DP(NETIF_MSG_LINK, "XGXS 8073\n"); 1924 DP(NETIF_MSG_LINK, "XGXS 8073\n");
1896 bnx2x_cl45_write(bp,
1897 params->port,
1898 ext_phy_type,
1899 ext_phy_addr,
1900 MDIO_PMA_DEVAD,
1901 MDIO_PMA_REG_CTRL,
1902 1<<15);
1903 } 1925 }
1904 break; 1926 break;
1905 1927
@@ -1908,10 +1930,11 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1908 1930
1909 /* Restore normal power mode*/ 1931 /* Restore normal power mode*/
1910 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 1932 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1911 MISC_REGISTERS_GPIO_OUTPUT_HIGH); 1933 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
1934 params->port);
1912 1935
1913 /* HW reset */ 1936 /* HW reset */
1914 bnx2x_hw_reset(bp); 1937 bnx2x_hw_reset(bp, params->port);
1915 1938
1916 break; 1939 break;
1917 1940
@@ -1934,7 +1957,7 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
1934 1957
1935 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: 1958 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
1936 DP(NETIF_MSG_LINK, "SerDes 5482\n"); 1959 DP(NETIF_MSG_LINK, "SerDes 5482\n");
1937 bnx2x_hw_reset(bp); 1960 bnx2x_hw_reset(bp, params->port);
1938 break; 1961 break;
1939 1962
1940 default: 1963 default:
@@ -2098,42 +2121,45 @@ static u8 bnx2x_bcm8073_xaui_wa(struct link_params *params)
2098 2121
2099} 2122}
2100 2123
2101static void bnx2x_bcm8073_external_rom_boot(struct link_params *params) 2124static void bnx2x_bcm8073_external_rom_boot(struct bnx2x *bp, u8 port,
2125 u8 ext_phy_addr)
2102{ 2126{
2103 struct bnx2x *bp = params->bp; 2127 u16 fw_ver1, fw_ver2;
2104 u8 port = params->port; 2128 /* Boot port from external ROM */
2105 u8 ext_phy_addr = ((params->ext_phy_config &
2106 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2107 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2108 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2109 u16 fw_ver1, fw_ver2, val;
2110 /* Need to wait 100ms after reset */
2111 msleep(100);
2112 /* Boot port from external ROM */
2113 /* EDC grst */ 2129 /* EDC grst */
2114 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2130 bnx2x_cl45_write(bp, port,
2131 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2132 ext_phy_addr,
2115 MDIO_PMA_DEVAD, 2133 MDIO_PMA_DEVAD,
2116 MDIO_PMA_REG_GEN_CTRL, 2134 MDIO_PMA_REG_GEN_CTRL,
2117 0x0001); 2135 0x0001);
2118 2136
2119 /* ucode reboot and rst */ 2137 /* ucode reboot and rst */
2120 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2138 bnx2x_cl45_write(bp, port,
2139 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2140 ext_phy_addr,
2121 MDIO_PMA_DEVAD, 2141 MDIO_PMA_DEVAD,
2122 MDIO_PMA_REG_GEN_CTRL, 2142 MDIO_PMA_REG_GEN_CTRL,
2123 0x008c); 2143 0x008c);
2124 2144
2125 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2145 bnx2x_cl45_write(bp, port,
2146 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2147 ext_phy_addr,
2126 MDIO_PMA_DEVAD, 2148 MDIO_PMA_DEVAD,
2127 MDIO_PMA_REG_MISC_CTRL1, 0x0001); 2149 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2128 2150
2129 /* Reset internal microprocessor */ 2151 /* Reset internal microprocessor */
2130 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2152 bnx2x_cl45_write(bp, port,
2153 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2154 ext_phy_addr,
2131 MDIO_PMA_DEVAD, 2155 MDIO_PMA_DEVAD,
2132 MDIO_PMA_REG_GEN_CTRL, 2156 MDIO_PMA_REG_GEN_CTRL,
2133 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 2157 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2134 2158
2135 /* Release srst bit */ 2159 /* Release srst bit */
2136 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2160 bnx2x_cl45_write(bp, port,
2161 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2162 ext_phy_addr,
2137 MDIO_PMA_DEVAD, 2163 MDIO_PMA_DEVAD,
2138 MDIO_PMA_REG_GEN_CTRL, 2164 MDIO_PMA_REG_GEN_CTRL,
2139 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 2165 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
@@ -2142,35 +2168,52 @@ static void bnx2x_bcm8073_external_rom_boot(struct link_params *params)
2142 msleep(100); 2168 msleep(100);
2143 2169
2144 /* Clear ser_boot_ctl bit */ 2170 /* Clear ser_boot_ctl bit */
2145 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2171 bnx2x_cl45_write(bp, port,
2172 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2173 ext_phy_addr,
2146 MDIO_PMA_DEVAD, 2174 MDIO_PMA_DEVAD,
2147 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 2175 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
2148 2176
2149 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, 2177 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2150 MDIO_PMA_DEVAD, 2178 ext_phy_addr,
2151 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 2179 MDIO_PMA_DEVAD,
2152 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr, 2180 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
2153 MDIO_PMA_DEVAD, 2181 bnx2x_cl45_read(bp, port,
2154 MDIO_PMA_REG_ROM_VER2, &fw_ver2); 2182 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
2183 ext_phy_addr,
2184 MDIO_PMA_DEVAD,
2185 MDIO_PMA_REG_ROM_VER2, &fw_ver2);
2155 DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2); 2186 DP(NETIF_MSG_LINK, "8073 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
2156 2187
2157 /* Only set bit 10 = 1 (Tx power down) */ 2188}
2158 bnx2x_cl45_read(bp, port, ext_phy_type, ext_phy_addr,
2159 MDIO_PMA_DEVAD,
2160 MDIO_PMA_REG_TX_POWER_DOWN, &val);
2161 2189
2190static void bnx2x_bcm807x_force_10G(struct link_params *params)
2191{
2192 struct bnx2x *bp = params->bp;
2193 u8 port = params->port;
2194 u8 ext_phy_addr = ((params->ext_phy_config &
2195 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2196 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2197 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2198
2199 /* Force KR or KX */
2162 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2200 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2163 MDIO_PMA_DEVAD, 2201 MDIO_PMA_DEVAD,
2164 MDIO_PMA_REG_TX_POWER_DOWN, (val | 1<<10)); 2202 MDIO_PMA_REG_CTRL,
2165 2203 0x2040);
2166 msleep(600);
2167 /* Release bit 10 (Release Tx power down) */
2168 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2204 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2169 MDIO_PMA_DEVAD, 2205 MDIO_PMA_DEVAD,
2170 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); 2206 MDIO_PMA_REG_10G_CTRL2,
2171 2207 0x000b);
2208 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2209 MDIO_PMA_DEVAD,
2210 MDIO_PMA_REG_BCM_CTRL,
2211 0x0000);
2212 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2213 MDIO_AN_DEVAD,
2214 MDIO_AN_REG_CTRL,
2215 0x0000);
2172} 2216}
2173
2174static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params) 2217static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
2175{ 2218{
2176 struct bnx2x *bp = params->bp; 2219 struct bnx2x *bp = params->bp;
@@ -2236,32 +2279,51 @@ static void bnx2x_bcm8073_set_xaui_low_power_mode(struct link_params *params)
2236 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2279 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2237 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val); 2280 MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
2238} 2281}
2239static void bnx2x_bcm807x_force_10G(struct link_params *params) 2282
2283static void bnx2x_8073_set_pause_cl37(struct link_params *params,
2284 struct link_vars *vars)
2240{ 2285{
2286
2241 struct bnx2x *bp = params->bp; 2287 struct bnx2x *bp = params->bp;
2242 u8 port = params->port; 2288 u16 cl37_val;
2243 u8 ext_phy_addr = ((params->ext_phy_config & 2289 u8 ext_phy_addr = ((params->ext_phy_config &
2244 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> 2290 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2245 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); 2291 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2246 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 2292 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
2247 2293
2248 /* Force KR or KX */ 2294 bnx2x_cl45_read(bp, params->port,
2249 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2295 ext_phy_type,
2250 MDIO_PMA_DEVAD, 2296 ext_phy_addr,
2251 MDIO_PMA_REG_CTRL, 2297 MDIO_AN_DEVAD,
2252 0x2040); 2298 MDIO_AN_REG_CL37_FC_LD, &cl37_val);
2253 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2299
2254 MDIO_PMA_DEVAD, 2300 cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2255 MDIO_PMA_REG_10G_CTRL2, 2301 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2256 0x000b); 2302
2257 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2303 if ((vars->ieee_fc &
2258 MDIO_PMA_DEVAD, 2304 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
2259 MDIO_PMA_REG_BCM_CTRL, 2305 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
2260 0x0000); 2306 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
2261 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2307 }
2308 if ((vars->ieee_fc &
2309 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
2310 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
2311 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2312 }
2313 if ((vars->ieee_fc &
2314 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
2315 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
2316 cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2317 }
2318 DP(NETIF_MSG_LINK,
2319 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
2320
2321 bnx2x_cl45_write(bp, params->port,
2322 ext_phy_type,
2323 ext_phy_addr,
2262 MDIO_AN_DEVAD, 2324 MDIO_AN_DEVAD,
2263 MDIO_AN_REG_CTRL, 2325 MDIO_AN_REG_CL37_FC_LD, cl37_val);
2264 0x0000); 2326 msleep(500);
2265} 2327}
2266 2328
2267static void bnx2x_ext_phy_set_pause(struct link_params *params, 2329static void bnx2x_ext_phy_set_pause(struct link_params *params,
@@ -2282,13 +2344,16 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
2282 MDIO_AN_REG_ADV_PAUSE, &val); 2344 MDIO_AN_REG_ADV_PAUSE, &val);
2283 2345
2284 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; 2346 val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
2347
2285 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ 2348 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
2286 2349
2287 if (vars->ieee_fc & 2350 if ((vars->ieee_fc &
2351 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
2288 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { 2352 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
2289 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; 2353 val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
2290 } 2354 }
2291 if (vars->ieee_fc & 2355 if ((vars->ieee_fc &
2356 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
2292 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) { 2357 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
2293 val |= 2358 val |=
2294 MDIO_AN_REG_ADV_PAUSE_PAUSE; 2359 MDIO_AN_REG_ADV_PAUSE_PAUSE;
@@ -2302,6 +2367,65 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
2302 MDIO_AN_REG_ADV_PAUSE, val); 2367 MDIO_AN_REG_ADV_PAUSE, val);
2303} 2368}
2304 2369
2370
2371static void bnx2x_init_internal_phy(struct link_params *params,
2372 struct link_vars *vars)
2373{
2374 struct bnx2x *bp = params->bp;
2375 u8 port = params->port;
2376 if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
2377 u16 bank, rx_eq;
2378
2379 rx_eq = ((params->serdes_config &
2380 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
2381 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
2382
2383 DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq);
2384 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
2385 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) {
2386 CL45_WR_OVER_CL22(bp, port,
2387 params->phy_addr,
2388 bank ,
2389 MDIO_RX0_RX_EQ_BOOST,
2390 ((rx_eq &
2391 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
2392 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
2393 }
2394
2395 /* forced speed requested? */
2396 if (vars->line_speed != SPEED_AUTO_NEG) {
2397 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2398
2399 /* disable autoneg */
2400 bnx2x_set_autoneg(params, vars);
2401
2402 /* program speed and duplex */
2403 bnx2x_program_serdes(params, vars);
2404
2405 } else { /* AN_mode */
2406 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2407
2408 /* AN enabled */
2409 bnx2x_set_brcm_cl37_advertisment(params);
2410
2411 /* program duplex & pause advertisement (for aneg) */
2412 bnx2x_set_ieee_aneg_advertisment(params,
2413 vars->ieee_fc);
2414
2415 /* enable autoneg */
2416 bnx2x_set_autoneg(params, vars);
2417
2418 /* enable and restart AN */
2419 bnx2x_restart_autoneg(params);
2420 }
2421
2422 } else { /* SGMII mode */
2423 DP(NETIF_MSG_LINK, "SGMII\n");
2424
2425 bnx2x_initialize_sgmii_process(params, vars);
2426 }
2427}
2428
2305static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars) 2429static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2306{ 2430{
2307 struct bnx2x *bp = params->bp; 2431 struct bnx2x *bp = params->bp;
@@ -2343,7 +2467,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2343 2467
2344 switch (ext_phy_type) { 2468 switch (ext_phy_type) {
2345 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 2469 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2346 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2347 break; 2470 break;
2348 2471
2349 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: 2472 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
@@ -2419,7 +2542,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2419 ext_phy_type, 2542 ext_phy_type,
2420 ext_phy_addr, 2543 ext_phy_addr,
2421 MDIO_AN_DEVAD, 2544 MDIO_AN_DEVAD,
2422 MDIO_AN_REG_CL37_FD, 2545 MDIO_AN_REG_CL37_FC_LP,
2423 0x0020); 2546 0x0020);
2424 /* Enable CL37 AN */ 2547 /* Enable CL37 AN */
2425 bnx2x_cl45_write(bp, params->port, 2548 bnx2x_cl45_write(bp, params->port,
@@ -2458,54 +2581,43 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2458 rx_alarm_ctrl_val = 0x400; 2581 rx_alarm_ctrl_val = 0x400;
2459 lasi_ctrl_val = 0x0004; 2582 lasi_ctrl_val = 0x0004;
2460 } else { 2583 } else {
2461 /* In 8073, port1 is directed through emac0 and
2462 * port0 is directed through emac1
2463 */
2464 rx_alarm_ctrl_val = (1<<2); 2584 rx_alarm_ctrl_val = (1<<2);
2465 /*lasi_ctrl_val = 0x0005;*/
2466 lasi_ctrl_val = 0x0004; 2585 lasi_ctrl_val = 0x0004;
2467 } 2586 }
2468 2587
2469 /* Wait for soft reset to get cleared upto 1 sec */ 2588 /* enable LASI */
2470 for (cnt = 0; cnt < 1000; cnt++) { 2589 bnx2x_cl45_write(bp, params->port,
2471 bnx2x_cl45_read(bp, params->port, 2590 ext_phy_type,
2472 ext_phy_type, 2591 ext_phy_addr,
2473 ext_phy_addr, 2592 MDIO_PMA_DEVAD,
2474 MDIO_PMA_DEVAD, 2593 MDIO_PMA_REG_RX_ALARM_CTRL,
2475 MDIO_PMA_REG_CTRL, 2594 rx_alarm_ctrl_val);
2476 &ctrl); 2595
2477 if (!(ctrl & (1<<15))) 2596 bnx2x_cl45_write(bp, params->port,
2478 break; 2597 ext_phy_type,
2479 msleep(1); 2598 ext_phy_addr,
2480 } 2599 MDIO_PMA_DEVAD,
2481 DP(NETIF_MSG_LINK, 2600 MDIO_PMA_REG_LASI_CTRL,
2482 "807x control reg 0x%x (after %d ms)\n", 2601 lasi_ctrl_val);
2483 ctrl, cnt); 2602
2603 bnx2x_8073_set_pause_cl37(params, vars);
2484 2604
2485 if (ext_phy_type == 2605 if (ext_phy_type ==
2486 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){ 2606 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072){
2487 bnx2x_bcm8072_external_rom_boot(params); 2607 bnx2x_bcm8072_external_rom_boot(params);
2488 } else { 2608 } else {
2489 bnx2x_bcm8073_external_rom_boot(params); 2609
2490 /* In case of 8073 with long xaui lines, 2610 /* In case of 8073 with long xaui lines,
2491 don't set the 8073 xaui low power*/ 2611 don't set the 8073 xaui low power*/
2492 bnx2x_bcm8073_set_xaui_low_power_mode(params); 2612 bnx2x_bcm8073_set_xaui_low_power_mode(params);
2493 } 2613 }
2494 2614
2495 /* enable LASI */ 2615 bnx2x_cl45_read(bp, params->port,
2496 bnx2x_cl45_write(bp, params->port, 2616 ext_phy_type,
2497 ext_phy_type, 2617 ext_phy_addr,
2498 ext_phy_addr, 2618 MDIO_PMA_DEVAD,
2499 MDIO_PMA_DEVAD, 2619 0xca13,
2500 MDIO_PMA_REG_RX_ALARM_CTRL, 2620 &tmp1);
2501 rx_alarm_ctrl_val);
2502
2503 bnx2x_cl45_write(bp, params->port,
2504 ext_phy_type,
2505 ext_phy_addr,
2506 MDIO_PMA_DEVAD,
2507 MDIO_PMA_REG_LASI_CTRL,
2508 lasi_ctrl_val);
2509 2621
2510 bnx2x_cl45_read(bp, params->port, 2622 bnx2x_cl45_read(bp, params->port,
2511 ext_phy_type, 2623 ext_phy_type,
@@ -2519,12 +2631,21 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2519 /* If this is forced speed, set to KR or KX 2631 /* If this is forced speed, set to KR or KX
2520 * (all other are not supported) 2632 * (all other are not supported)
2521 */ 2633 */
2522 if (!(params->req_line_speed == SPEED_AUTO_NEG)) { 2634 if (params->loopback_mode == LOOPBACK_EXT) {
2523 if (params->req_line_speed == SPEED_10000) { 2635 bnx2x_bcm807x_force_10G(params);
2524 bnx2x_bcm807x_force_10G(params); 2636 DP(NETIF_MSG_LINK,
2525 DP(NETIF_MSG_LINK, 2637 "Forced speed 10G on 807X\n");
2526 "Forced speed 10G on 807X\n"); 2638 break;
2527 break; 2639 } else {
2640 bnx2x_cl45_write(bp, params->port,
2641 ext_phy_type, ext_phy_addr,
2642 MDIO_PMA_DEVAD,
2643 MDIO_PMA_REG_BCM_CTRL,
2644 0x0002);
2645 }
2646 if (params->req_line_speed != SPEED_AUTO_NEG) {
2647 if (params->req_line_speed == SPEED_10000) {
2648 val = (1<<7);
2528 } else if (params->req_line_speed == 2649 } else if (params->req_line_speed ==
2529 SPEED_2500) { 2650 SPEED_2500) {
2530 val = (1<<5); 2651 val = (1<<5);
@@ -2539,11 +2660,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2539 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2660 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2540 val |= (1<<7); 2661 val |= (1<<7);
2541 2662
2663 /* Note that 2.5G works only when
2664 used with 1G advertisment */
2542 if (params->speed_cap_mask & 2665 if (params->speed_cap_mask &
2543 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) 2666 (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
2667 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
2544 val |= (1<<5); 2668 val |= (1<<5);
2545 DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val); 2669 DP(NETIF_MSG_LINK,
2546 /*val = ((1<<5)|(1<<7));*/ 2670 "807x autoneg val = 0x%x\n", val);
2547 } 2671 }
2548 2672
2549 bnx2x_cl45_write(bp, params->port, 2673 bnx2x_cl45_write(bp, params->port,
@@ -2554,20 +2678,19 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2554 2678
2555 if (ext_phy_type == 2679 if (ext_phy_type ==
2556 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 2680 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
2557 /* Disable 2.5Ghz */ 2681
2558 bnx2x_cl45_read(bp, params->port, 2682 bnx2x_cl45_read(bp, params->port,
2559 ext_phy_type, 2683 ext_phy_type,
2560 ext_phy_addr, 2684 ext_phy_addr,
2561 MDIO_AN_DEVAD, 2685 MDIO_AN_DEVAD,
2562 0x8329, &tmp1); 2686 0x8329, &tmp1);
2563/* SUPPORT_SPEED_CAPABILITY 2687
2564 (Due to the nature of the link order, its not 2688 if (((params->speed_cap_mask &
2565 possible to enable 2.5G within the autoneg 2689 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
2566 capabilities) 2690 (params->req_line_speed ==
2567 if (params->speed_cap_mask & 2691 SPEED_AUTO_NEG)) ||
2568 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 2692 (params->req_line_speed ==
2569*/ 2693 SPEED_2500)) {
2570 if (params->req_line_speed == SPEED_2500) {
2571 u16 phy_ver; 2694 u16 phy_ver;
2572 /* Allow 2.5G for A1 and above */ 2695 /* Allow 2.5G for A1 and above */
2573 bnx2x_cl45_read(bp, params->port, 2696 bnx2x_cl45_read(bp, params->port,
@@ -2575,49 +2698,53 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2575 ext_phy_addr, 2698 ext_phy_addr,
2576 MDIO_PMA_DEVAD, 2699 MDIO_PMA_DEVAD,
2577 0xc801, &phy_ver); 2700 0xc801, &phy_ver);
2578 2701 DP(NETIF_MSG_LINK, "Add 2.5G\n");
2579 if (phy_ver > 0) 2702 if (phy_ver > 0)
2580 tmp1 |= 1; 2703 tmp1 |= 1;
2581 else 2704 else
2582 tmp1 &= 0xfffe; 2705 tmp1 &= 0xfffe;
2583 } 2706 } else {
2584 else 2707 DP(NETIF_MSG_LINK, "Disable 2.5G\n");
2585 tmp1 &= 0xfffe; 2708 tmp1 &= 0xfffe;
2709 }
2586 2710
2587 bnx2x_cl45_write(bp, params->port, 2711 bnx2x_cl45_write(bp, params->port,
2588 ext_phy_type, 2712 ext_phy_type,
2589 ext_phy_addr, 2713 ext_phy_addr,
2590 MDIO_AN_DEVAD, 2714 MDIO_AN_DEVAD,
2591 0x8329, tmp1); 2715 0x8329, tmp1);
2592 } 2716 }
2593 /* Add support for CL37 (passive mode) I */ 2717
2594 bnx2x_cl45_write(bp, params->port, 2718 /* Add support for CL37 (passive mode) II */
2719
2720 bnx2x_cl45_read(bp, params->port,
2595 ext_phy_type, 2721 ext_phy_type,
2596 ext_phy_addr, 2722 ext_phy_addr,
2597 MDIO_AN_DEVAD, 2723 MDIO_AN_DEVAD,
2598 MDIO_AN_REG_CL37_CL73, 0x040c); 2724 MDIO_AN_REG_CL37_FC_LD,
2599 /* Add support for CL37 (passive mode) II */ 2725 &tmp1);
2726
2600 bnx2x_cl45_write(bp, params->port, 2727 bnx2x_cl45_write(bp, params->port,
2601 ext_phy_type, 2728 ext_phy_type,
2602 ext_phy_addr, 2729 ext_phy_addr,
2603 MDIO_AN_DEVAD, 2730 MDIO_AN_DEVAD,
2604 MDIO_AN_REG_CL37_FD, 0x20); 2731 MDIO_AN_REG_CL37_FC_LD, (tmp1 |
2732 ((params->req_duplex == DUPLEX_FULL) ?
2733 0x20 : 0x40)));
2734
2605 /* Add support for CL37 (passive mode) III */ 2735 /* Add support for CL37 (passive mode) III */
2606 bnx2x_cl45_write(bp, params->port, 2736 bnx2x_cl45_write(bp, params->port,
2607 ext_phy_type, 2737 ext_phy_type,
2608 ext_phy_addr, 2738 ext_phy_addr,
2609 MDIO_AN_DEVAD, 2739 MDIO_AN_DEVAD,
2610 MDIO_AN_REG_CL37_AN, 0x1000); 2740 MDIO_AN_REG_CL37_AN, 0x1000);
2611 /* Restart autoneg */
2612 msleep(500);
2613 2741
2614 if (ext_phy_type == 2742 if (ext_phy_type ==
2615 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 2743 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
2616 2744 /* The SNR will improve about 2db by changing
2617 /* The SNR will improve about 2db by changing the
2618 BW and FEE main tap. Rest commands are executed 2745 BW and FEE main tap. Rest commands are executed
2619 after link is up*/ 2746 after link is up*/
2620 /* Change FFE main cursor to 5 in EDC register */ 2747 /*Change FFE main cursor to 5 in EDC register*/
2621 if (bnx2x_8073_is_snr_needed(params)) 2748 if (bnx2x_8073_is_snr_needed(params))
2622 bnx2x_cl45_write(bp, params->port, 2749 bnx2x_cl45_write(bp, params->port,
2623 ext_phy_type, 2750 ext_phy_type,
@@ -2626,25 +2753,28 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2626 MDIO_PMA_REG_EDC_FFE_MAIN, 2753 MDIO_PMA_REG_EDC_FFE_MAIN,
2627 0xFB0C); 2754 0xFB0C);
2628 2755
2629 /* Enable FEC (Forware Error Correction) 2756 /* Enable FEC (Forware Error Correction)
2630 Request in the AN */ 2757 Request in the AN */
2631 bnx2x_cl45_read(bp, params->port, 2758 bnx2x_cl45_read(bp, params->port,
2632 ext_phy_type, 2759 ext_phy_type,
2633 ext_phy_addr, 2760 ext_phy_addr,
2634 MDIO_AN_DEVAD, 2761 MDIO_AN_DEVAD,
2635 MDIO_AN_REG_ADV2, &tmp1); 2762 MDIO_AN_REG_ADV2, &tmp1);
2636 2763
2637 tmp1 |= (1<<15); 2764 tmp1 |= (1<<15);
2765
2766 bnx2x_cl45_write(bp, params->port,
2767 ext_phy_type,
2768 ext_phy_addr,
2769 MDIO_AN_DEVAD,
2770 MDIO_AN_REG_ADV2, tmp1);
2638 2771
2639 bnx2x_cl45_write(bp, params->port,
2640 ext_phy_type,
2641 ext_phy_addr,
2642 MDIO_AN_DEVAD,
2643 MDIO_AN_REG_ADV2, tmp1);
2644 } 2772 }
2645 2773
2646 bnx2x_ext_phy_set_pause(params, vars); 2774 bnx2x_ext_phy_set_pause(params, vars);
2647 2775
2776 /* Restart autoneg */
2777 msleep(500);
2648 bnx2x_cl45_write(bp, params->port, 2778 bnx2x_cl45_write(bp, params->port,
2649 ext_phy_type, 2779 ext_phy_type,
2650 ext_phy_addr, 2780 ext_phy_addr,
@@ -2701,10 +2831,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2701 } 2831 }
2702 2832
2703 } else { /* SerDes */ 2833 } else { /* SerDes */
2704/* ext_phy_addr = ((bp->ext_phy_config & 2834
2705 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
2706 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
2707*/
2708 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config); 2835 ext_phy_type = SERDES_EXT_PHY_TYPE(params->ext_phy_config);
2709 switch (ext_phy_type) { 2836 switch (ext_phy_type) {
2710 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: 2837 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
@@ -2726,7 +2853,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
2726 2853
2727 2854
2728static u8 bnx2x_ext_phy_is_link_up(struct link_params *params, 2855static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2729 struct link_vars *vars) 2856 struct link_vars *vars)
2730{ 2857{
2731 struct bnx2x *bp = params->bp; 2858 struct bnx2x *bp = params->bp;
2732 u32 ext_phy_type; 2859 u32 ext_phy_type;
@@ -2767,6 +2894,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2767 MDIO_PMA_REG_RX_SD, &rx_sd); 2894 MDIO_PMA_REG_RX_SD, &rx_sd);
2768 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd); 2895 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2769 ext_phy_link_up = (rx_sd & 0x1); 2896 ext_phy_link_up = (rx_sd & 0x1);
2897 if (ext_phy_link_up)
2898 vars->line_speed = SPEED_10000;
2770 break; 2899 break;
2771 2900
2772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: 2901 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
@@ -2810,6 +2939,13 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2810 */ 2939 */
2811 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) || 2940 ext_phy_link_up = ((rx_sd & pcs_status & 0x1) ||
2812 (val2 & (1<<1))); 2941 (val2 & (1<<1)));
2942 if (ext_phy_link_up) {
2943 if (val2 & (1<<1))
2944 vars->line_speed = SPEED_1000;
2945 else
2946 vars->line_speed = SPEED_10000;
2947 }
2948
2813 /* clear LASI indication*/ 2949 /* clear LASI indication*/
2814 bnx2x_cl45_read(bp, params->port, ext_phy_type, 2950 bnx2x_cl45_read(bp, params->port, ext_phy_type,
2815 ext_phy_addr, 2951 ext_phy_addr,
@@ -2820,6 +2956,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2820 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 2956 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2821 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 2957 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
2822 { 2958 {
2959 u16 link_status = 0;
2960 u16 an1000_status = 0;
2823 if (ext_phy_type == 2961 if (ext_phy_type ==
2824 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) { 2962 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) {
2825 bnx2x_cl45_read(bp, params->port, 2963 bnx2x_cl45_read(bp, params->port,
@@ -2846,14 +2984,9 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2846 MDIO_PMA_DEVAD, 2984 MDIO_PMA_DEVAD,
2847 MDIO_PMA_REG_LASI_STATUS, &val1); 2985 MDIO_PMA_REG_LASI_STATUS, &val1);
2848 2986
2849 bnx2x_cl45_read(bp, params->port,
2850 ext_phy_type,
2851 ext_phy_addr,
2852 MDIO_PMA_DEVAD,
2853 MDIO_PMA_REG_LASI_STATUS, &val2);
2854 DP(NETIF_MSG_LINK, 2987 DP(NETIF_MSG_LINK,
2855 "8703 LASI status 0x%x->0x%x\n", 2988 "8703 LASI status 0x%x\n",
2856 val1, val2); 2989 val1);
2857 } 2990 }
2858 2991
2859 /* clear the interrupt LASI status register */ 2992 /* clear the interrupt LASI status register */
@@ -2869,20 +3002,23 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2869 MDIO_PCS_REG_STATUS, &val1); 3002 MDIO_PCS_REG_STATUS, &val1);
2870 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", 3003 DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n",
2871 val2, val1); 3004 val2, val1);
2872 /* Check the LASI */ 3005 /* Clear MSG-OUT */
2873 bnx2x_cl45_read(bp, params->port, 3006 bnx2x_cl45_read(bp, params->port,
2874 ext_phy_type, 3007 ext_phy_type,
2875 ext_phy_addr, 3008 ext_phy_addr,
2876 MDIO_PMA_DEVAD, 3009 MDIO_PMA_DEVAD,
2877 MDIO_PMA_REG_RX_ALARM, &val2); 3010 0xca13,
3011 &val1);
3012
3013 /* Check the LASI */
2878 bnx2x_cl45_read(bp, params->port, 3014 bnx2x_cl45_read(bp, params->port,
2879 ext_phy_type, 3015 ext_phy_type,
2880 ext_phy_addr, 3016 ext_phy_addr,
2881 MDIO_PMA_DEVAD, 3017 MDIO_PMA_DEVAD,
2882 MDIO_PMA_REG_RX_ALARM, 3018 MDIO_PMA_REG_RX_ALARM, &val2);
2883 &val1); 3019
2884 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n", 3020 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
2885 val2, val1); 3021
2886 /* Check the link status */ 3022 /* Check the link status */
2887 bnx2x_cl45_read(bp, params->port, 3023 bnx2x_cl45_read(bp, params->port,
2888 ext_phy_type, 3024 ext_phy_type,
@@ -2905,29 +3041,29 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2905 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1); 3041 DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
2906 if (ext_phy_type == 3042 if (ext_phy_type ==
2907 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 3043 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
2908 u16 an1000_status = 0; 3044
2909 if (ext_phy_link_up && 3045 if (ext_phy_link_up &&
2910 ( 3046 ((params->req_line_speed !=
2911 (params->req_line_speed != SPEED_10000) 3047 SPEED_10000))) {
2912 )) {
2913 if (bnx2x_bcm8073_xaui_wa(params) 3048 if (bnx2x_bcm8073_xaui_wa(params)
2914 != 0) { 3049 != 0) {
2915 ext_phy_link_up = 0; 3050 ext_phy_link_up = 0;
2916 break; 3051 break;
2917 } 3052 }
2918 bnx2x_cl45_read(bp, params->port, 3053 }
3054 bnx2x_cl45_read(bp, params->port,
2919 ext_phy_type, 3055 ext_phy_type,
2920 ext_phy_addr, 3056 ext_phy_addr,
2921 MDIO_XS_DEVAD, 3057 MDIO_AN_DEVAD,
2922 0x8304, 3058 0x8304,
2923 &an1000_status); 3059 &an1000_status);
2924 bnx2x_cl45_read(bp, params->port, 3060 bnx2x_cl45_read(bp, params->port,
2925 ext_phy_type, 3061 ext_phy_type,
2926 ext_phy_addr, 3062 ext_phy_addr,
2927 MDIO_XS_DEVAD, 3063 MDIO_AN_DEVAD,
2928 0x8304, 3064 0x8304,
2929 &an1000_status); 3065 &an1000_status);
2930 } 3066
2931 /* Check the link status on 1.1.2 */ 3067 /* Check the link status on 1.1.2 */
2932 bnx2x_cl45_read(bp, params->port, 3068 bnx2x_cl45_read(bp, params->port,
2933 ext_phy_type, 3069 ext_phy_type,
@@ -2943,8 +3079,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2943 "an_link_status=0x%x\n", 3079 "an_link_status=0x%x\n",
2944 val2, val1, an1000_status); 3080 val2, val1, an1000_status);
2945 3081
2946 ext_phy_link_up = (((val1 & 4) == 4) || 3082 ext_phy_link_up = (((val1 & 4) == 4) ||
2947 (an1000_status & (1<<1))); 3083 (an1000_status & (1<<1)));
2948 if (ext_phy_link_up && 3084 if (ext_phy_link_up &&
2949 bnx2x_8073_is_snr_needed(params)) { 3085 bnx2x_8073_is_snr_needed(params)) {
2950 /* The SNR will improve about 2dbby 3086 /* The SNR will improve about 2dbby
@@ -2968,8 +3104,74 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
2968 MDIO_PMA_REG_CDR_BANDWIDTH, 3104 MDIO_PMA_REG_CDR_BANDWIDTH,
2969 0x0333); 3105 0x0333);
2970 3106
3107
3108 }
3109 bnx2x_cl45_read(bp, params->port,
3110 ext_phy_type,
3111 ext_phy_addr,
3112 MDIO_PMA_DEVAD,
3113 0xc820,
3114 &link_status);
3115
3116 /* Bits 0..2 --> speed detected,
3117 bits 13..15--> link is down */
3118 if ((link_status & (1<<2)) &&
3119 (!(link_status & (1<<15)))) {
3120 ext_phy_link_up = 1;
3121 vars->line_speed = SPEED_10000;
3122 DP(NETIF_MSG_LINK,
3123 "port %x: External link"
3124 " up in 10G\n", params->port);
3125 } else if ((link_status & (1<<1)) &&
3126 (!(link_status & (1<<14)))) {
3127 ext_phy_link_up = 1;
3128 vars->line_speed = SPEED_2500;
3129 DP(NETIF_MSG_LINK,
3130 "port %x: External link"
3131 " up in 2.5G\n", params->port);
3132 } else if ((link_status & (1<<0)) &&
3133 (!(link_status & (1<<13)))) {
3134 ext_phy_link_up = 1;
3135 vars->line_speed = SPEED_1000;
3136 DP(NETIF_MSG_LINK,
3137 "port %x: External link"
3138 " up in 1G\n", params->port);
3139 } else {
3140 ext_phy_link_up = 0;
3141 DP(NETIF_MSG_LINK,
3142 "port %x: External link"
3143 " is down\n", params->port);
3144 }
3145 } else {
3146 /* See if 1G link is up for the 8072 */
3147 bnx2x_cl45_read(bp, params->port,
3148 ext_phy_type,
3149 ext_phy_addr,
3150 MDIO_AN_DEVAD,
3151 0x8304,
3152 &an1000_status);
3153 bnx2x_cl45_read(bp, params->port,
3154 ext_phy_type,
3155 ext_phy_addr,
3156 MDIO_AN_DEVAD,
3157 0x8304,
3158 &an1000_status);
3159 if (an1000_status & (1<<1)) {
3160 ext_phy_link_up = 1;
3161 vars->line_speed = SPEED_1000;
3162 DP(NETIF_MSG_LINK,
3163 "port %x: External link"
3164 " up in 1G\n", params->port);
3165 } else if (ext_phy_link_up) {
3166 ext_phy_link_up = 1;
3167 vars->line_speed = SPEED_10000;
3168 DP(NETIF_MSG_LINK,
3169 "port %x: External link"
3170 " up in 10G\n", params->port);
2971 } 3171 }
2972 } 3172 }
3173
3174
2973 break; 3175 break;
2974 } 3176 }
2975 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 3177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
@@ -3006,6 +3208,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
3006 MDIO_AN_DEVAD, 3208 MDIO_AN_DEVAD,
3007 MDIO_AN_REG_MASTER_STATUS, 3209 MDIO_AN_REG_MASTER_STATUS,
3008 &val2); 3210 &val2);
3211 vars->line_speed = SPEED_10000;
3009 DP(NETIF_MSG_LINK, 3212 DP(NETIF_MSG_LINK,
3010 "SFX7101 AN status 0x%x->Master=%x\n", 3213 "SFX7101 AN status 0x%x->Master=%x\n",
3011 val2, 3214 val2,
@@ -3100,7 +3303,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
3100 * link management 3303 * link management
3101 */ 3304 */
3102static void bnx2x_link_int_ack(struct link_params *params, 3305static void bnx2x_link_int_ack(struct link_params *params,
3103 struct link_vars *vars, u16 is_10g) 3306 struct link_vars *vars, u8 is_10g)
3104{ 3307{
3105 struct bnx2x *bp = params->bp; 3308 struct bnx2x *bp = params->bp;
3106 u8 port = params->port; 3309 u8 port = params->port;
@@ -3181,7 +3384,8 @@ static u8 bnx2x_format_ver(u32 num, u8 *str, u16 len)
3181} 3384}
3182 3385
3183 3386
3184static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr) 3387static void bnx2x_turn_on_ef(struct bnx2x *bp, u8 port, u8 ext_phy_addr,
3388 u32 ext_phy_type)
3185{ 3389{
3186 u32 cnt = 0; 3390 u32 cnt = 0;
3187 u16 ctrl = 0; 3391 u16 ctrl = 0;
@@ -3192,12 +3396,14 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
3192 3396
3193 /* take ext phy out of reset */ 3397 /* take ext phy out of reset */
3194 bnx2x_set_gpio(bp, 3398 bnx2x_set_gpio(bp,
3195 MISC_REGISTERS_GPIO_2, 3399 MISC_REGISTERS_GPIO_2,
3196 MISC_REGISTERS_GPIO_HIGH); 3400 MISC_REGISTERS_GPIO_HIGH,
3401 port);
3197 3402
3198 bnx2x_set_gpio(bp, 3403 bnx2x_set_gpio(bp,
3199 MISC_REGISTERS_GPIO_1, 3404 MISC_REGISTERS_GPIO_1,
3200 MISC_REGISTERS_GPIO_HIGH); 3405 MISC_REGISTERS_GPIO_HIGH,
3406 port);
3201 3407
3202 /* wait for 5ms */ 3408 /* wait for 5ms */
3203 msleep(5); 3409 msleep(5);
@@ -3205,7 +3411,7 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
3205 for (cnt = 0; cnt < 1000; cnt++) { 3411 for (cnt = 0; cnt < 1000; cnt++) {
3206 msleep(1); 3412 msleep(1);
3207 bnx2x_cl45_read(bp, port, 3413 bnx2x_cl45_read(bp, port,
3208 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 3414 ext_phy_type,
3209 ext_phy_addr, 3415 ext_phy_addr,
3210 MDIO_PMA_DEVAD, 3416 MDIO_PMA_DEVAD,
3211 MDIO_PMA_REG_CTRL, 3417 MDIO_PMA_REG_CTRL,
@@ -3217,13 +3423,17 @@ static void bnx2x_turn_on_sf(struct bnx2x *bp, u8 port, u8 ext_phy_addr)
3217 } 3423 }
3218} 3424}
3219 3425
3220static void bnx2x_turn_off_sf(struct bnx2x *bp) 3426static void bnx2x_turn_off_sf(struct bnx2x *bp, u8 port)
3221{ 3427{
3222 /* put sf to reset */ 3428 /* put sf to reset */
3223 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_LOW);
3224 bnx2x_set_gpio(bp, 3429 bnx2x_set_gpio(bp,
3225 MISC_REGISTERS_GPIO_2, 3430 MISC_REGISTERS_GPIO_1,
3226 MISC_REGISTERS_GPIO_LOW); 3431 MISC_REGISTERS_GPIO_LOW,
3432 port);
3433 bnx2x_set_gpio(bp,
3434 MISC_REGISTERS_GPIO_2,
3435 MISC_REGISTERS_GPIO_LOW,
3436 port);
3227} 3437}
3228 3438
3229u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, 3439u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
@@ -3253,7 +3463,8 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3253 3463
3254 /* Take ext phy out of reset */ 3464 /* Take ext phy out of reset */
3255 if (!driver_loaded) 3465 if (!driver_loaded)
3256 bnx2x_turn_on_sf(bp, params->port, ext_phy_addr); 3466 bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
3467 ext_phy_type);
3257 3468
3258 /* wait for 1ms */ 3469 /* wait for 1ms */
3259 msleep(1); 3470 msleep(1);
@@ -3276,11 +3487,16 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
3276 version[4] = '\0'; 3487 version[4] = '\0';
3277 3488
3278 if (!driver_loaded) 3489 if (!driver_loaded)
3279 bnx2x_turn_off_sf(bp); 3490 bnx2x_turn_off_sf(bp, params->port);
3280 break; 3491 break;
3281 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: 3492 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073: 3493 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
3283 { 3494 {
3495 /* Take ext phy out of reset */
3496 if (!driver_loaded)
3497 bnx2x_turn_on_ef(bp, params->port, ext_phy_addr,
3498 ext_phy_type);
3499
3284 bnx2x_cl45_read(bp, params->port, ext_phy_type, 3500 bnx2x_cl45_read(bp, params->port, ext_phy_type,
3285 ext_phy_addr, 3501 ext_phy_addr,
3286 MDIO_PMA_DEVAD, 3502 MDIO_PMA_DEVAD,
@@ -3333,7 +3549,7 @@ static void bnx2x_set_xgxs_loopback(struct link_params *params,
3333 struct bnx2x *bp = params->bp; 3549 struct bnx2x *bp = params->bp;
3334 3550
3335 if (is_10g) { 3551 if (is_10g) {
3336 u32 md_devad; 3552 u32 md_devad;
3337 3553
3338 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); 3554 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3339 3555
@@ -3553,6 +3769,8 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
3553 u16 hw_led_mode, u32 chip_id) 3769 u16 hw_led_mode, u32 chip_id)
3554{ 3770{
3555 u8 rc = 0; 3771 u8 rc = 0;
3772 u32 tmp;
3773 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
3556 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); 3774 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
3557 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", 3775 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
3558 speed, hw_led_mode); 3776 speed, hw_led_mode);
@@ -3561,6 +3779,9 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
3561 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); 3779 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
3562 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 3780 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
3563 SHARED_HW_CFG_LED_MAC1); 3781 SHARED_HW_CFG_LED_MAC1);
3782
3783 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3784 EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
3564 break; 3785 break;
3565 3786
3566 case LED_MODE_OPER: 3787 case LED_MODE_OPER:
@@ -3572,6 +3793,10 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
3572 LED_BLINK_RATE_VAL); 3793 LED_BLINK_RATE_VAL);
3573 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + 3794 REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
3574 port*4, 1); 3795 port*4, 1);
3796 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3797 EMAC_WR(bp, EMAC_REG_EMAC_LED,
3798 (tmp & (~EMAC_LED_OVERRIDE)));
3799
3575 if (!CHIP_IS_E1H(bp) && 3800 if (!CHIP_IS_E1H(bp) &&
3576 ((speed == SPEED_2500) || 3801 ((speed == SPEED_2500) ||
3577 (speed == SPEED_1000) || 3802 (speed == SPEED_1000) ||
@@ -3622,7 +3847,8 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3622 struct bnx2x *bp = params->bp; 3847 struct bnx2x *bp = params->bp;
3623 u8 port = params->port; 3848 u8 port = params->port;
3624 u8 rc = 0; 3849 u8 rc = 0;
3625 3850 u8 non_ext_phy;
3851 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
3626 /* Activate the external PHY */ 3852 /* Activate the external PHY */
3627 bnx2x_ext_phy_reset(params, vars); 3853 bnx2x_ext_phy_reset(params, vars);
3628 3854
@@ -3644,10 +3870,6 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3644 bnx2x_set_swap_lanes(params); 3870 bnx2x_set_swap_lanes(params);
3645 } 3871 }
3646 3872
3647 /* Set Parallel Detect */
3648 if (params->req_line_speed == SPEED_AUTO_NEG)
3649 bnx2x_set_parallel_detection(params, vars->phy_flags);
3650
3651 if (vars->phy_flags & PHY_XGXS_FLAG) { 3873 if (vars->phy_flags & PHY_XGXS_FLAG) {
3652 if (params->req_line_speed && 3874 if (params->req_line_speed &&
3653 ((params->req_line_speed == SPEED_100) || 3875 ((params->req_line_speed == SPEED_100) ||
@@ -3657,68 +3879,33 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3657 vars->phy_flags &= ~PHY_SGMII_FLAG; 3879 vars->phy_flags &= ~PHY_SGMII_FLAG;
3658 } 3880 }
3659 } 3881 }
3882 /* In case of external phy existance, the line speed would be the
3883 line speed linked up by the external phy. In case it is direct only,
3884 then the line_speed during initialization will be equal to the
3885 req_line_speed*/
3886 vars->line_speed = params->req_line_speed;
3660 3887
3661 if (!(vars->phy_flags & PHY_SGMII_FLAG)) { 3888 bnx2x_calc_ieee_aneg_adv(params, &vars->ieee_fc);
3662 u16 bank, rx_eq;
3663
3664 rx_eq = ((params->serdes_config &
3665 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3666 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3667 3889
3668 DP(NETIF_MSG_LINK, "setting rx eq to 0x%x\n", rx_eq); 3890 /* init ext phy and enable link state int */
3669 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL; 3891 non_ext_phy = ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ||
3670 bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0)) { 3892 (params->loopback_mode == LOOPBACK_XGXS_10) ||
3671 CL45_WR_OVER_CL22(bp, port, 3893 (params->loopback_mode == LOOPBACK_EXT_PHY));
3672 params->phy_addr, 3894
3673 bank , 3895 if (non_ext_phy ||
3674 MDIO_RX0_RX_EQ_BOOST, 3896 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705)) {
3675 ((rx_eq & 3897 if (params->req_line_speed == SPEED_AUTO_NEG)
3676 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) | 3898 bnx2x_set_parallel_detection(params, vars->phy_flags);
3677 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL)); 3899 bnx2x_init_internal_phy(params, vars);
3678 }
3679
3680 /* forced speed requested? */
3681 if (params->req_line_speed != SPEED_AUTO_NEG) {
3682 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3683
3684 /* disable autoneg */
3685 bnx2x_set_autoneg(params, vars);
3686
3687 /* program speed and duplex */
3688 bnx2x_program_serdes(params);
3689 vars->ieee_fc =
3690 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3691
3692 } else { /* AN_mode */
3693 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3694
3695 /* AN enabled */
3696 bnx2x_set_brcm_cl37_advertisment(params);
3697
3698 /* program duplex & pause advertisement (for aneg) */
3699 bnx2x_set_ieee_aneg_advertisment(params,
3700 &vars->ieee_fc);
3701
3702 /* enable autoneg */
3703 bnx2x_set_autoneg(params, vars);
3704
3705 /* enable and restart AN */
3706 bnx2x_restart_autoneg(params);
3707 }
3708
3709 } else { /* SGMII mode */
3710 DP(NETIF_MSG_LINK, "SGMII\n");
3711
3712 bnx2x_initialize_sgmii_process(params);
3713 } 3900 }
3714 3901
3715 /* init ext phy and enable link state int */ 3902 if (!non_ext_phy)
3716 rc |= bnx2x_ext_phy_init(params, vars); 3903 rc |= bnx2x_ext_phy_init(params, vars);
3717 3904
3718 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 3905 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
3719 (NIG_STATUS_XGXS0_LINK10G | 3906 (NIG_STATUS_XGXS0_LINK10G |
3720 NIG_STATUS_XGXS0_LINK_STATUS | 3907 NIG_STATUS_XGXS0_LINK_STATUS |
3721 NIG_STATUS_SERDES0_LINK_STATUS)); 3908 NIG_STATUS_SERDES0_LINK_STATUS));
3722 3909
3723 return rc; 3910 return rc;
3724 3911
@@ -3730,15 +3917,23 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3730 struct bnx2x *bp = params->bp; 3917 struct bnx2x *bp = params->bp;
3731 3918
3732 u32 val; 3919 u32 val;
3733 DP(NETIF_MSG_LINK, "Phy Initialization started\n"); 3920 DP(NETIF_MSG_LINK, "Phy Initialization started \n");
3734 DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n", 3921 DP(NETIF_MSG_LINK, "req_speed = %d, req_flowctrl=%d\n",
3735 params->req_line_speed, params->req_flow_ctrl); 3922 params->req_line_speed, params->req_flow_ctrl);
3736 vars->link_status = 0; 3923 vars->link_status = 0;
3924 vars->phy_link_up = 0;
3925 vars->link_up = 0;
3926 vars->line_speed = 0;
3927 vars->duplex = DUPLEX_FULL;
3928 vars->flow_ctrl = FLOW_CTRL_NONE;
3929 vars->mac_type = MAC_TYPE_NONE;
3930
3737 if (params->switch_cfg == SWITCH_CFG_1G) 3931 if (params->switch_cfg == SWITCH_CFG_1G)
3738 vars->phy_flags = PHY_SERDES_FLAG; 3932 vars->phy_flags = PHY_SERDES_FLAG;
3739 else 3933 else
3740 vars->phy_flags = PHY_XGXS_FLAG; 3934 vars->phy_flags = PHY_XGXS_FLAG;
3741 3935
3936
3742 /* disable attentions */ 3937 /* disable attentions */
3743 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, 3938 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
3744 (NIG_MASK_XGXS0_LINK_STATUS | 3939 (NIG_MASK_XGXS0_LINK_STATUS |
@@ -3894,6 +4089,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3894 } 4089 }
3895 4090
3896 bnx2x_link_initialize(params, vars); 4091 bnx2x_link_initialize(params, vars);
4092 msleep(30);
3897 bnx2x_link_int_enable(params); 4093 bnx2x_link_int_enable(params);
3898 } 4094 }
3899 return 0; 4095 return 0;
@@ -3943,39 +4139,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
3943 /* HW reset */ 4139 /* HW reset */
3944 4140
3945 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 4141 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3946 MISC_REGISTERS_GPIO_OUTPUT_LOW); 4142 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4143 port);
3947 4144
3948 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4145 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3949 MISC_REGISTERS_GPIO_OUTPUT_LOW); 4146 MISC_REGISTERS_GPIO_OUTPUT_LOW,
4147 port);
3950 4148
3951 DP(NETIF_MSG_LINK, "reset external PHY\n"); 4149 DP(NETIF_MSG_LINK, "reset external PHY\n");
3952 } else { 4150 } else if (ext_phy_type ==
3953 4151 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
3954 u8 ext_phy_addr = ((ext_phy_config & 4152 DP(NETIF_MSG_LINK, "Setting 8073 port %d into "
3955 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3956 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3957
3958 /* SW reset */
3959 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3960 MDIO_PMA_DEVAD,
3961 MDIO_PMA_REG_CTRL,
3962 1<<15);
3963
3964 /* Set Low Power Mode */
3965 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
3966 MDIO_PMA_DEVAD,
3967 MDIO_PMA_REG_CTRL,
3968 1<<11);
3969
3970
3971 if (ext_phy_type ==
3972 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
3973 DP(NETIF_MSG_LINK, "Setting 8073 port %d into"
3974 "low power mode\n", 4153 "low power mode\n",
3975 port); 4154 port);
3976 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 4155 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
3977 MISC_REGISTERS_GPIO_OUTPUT_LOW); 4156 MISC_REGISTERS_GPIO_OUTPUT_LOW,
3978 } 4157 port);
3979 } 4158 }
3980 } 4159 }
3981 /* reset the SerDes/XGXS */ 4160 /* reset the SerDes/XGXS */
@@ -3995,6 +4174,73 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars)
3995 return 0; 4174 return 0;
3996} 4175}
3997 4176
4177static u8 bnx2x_update_link_down(struct link_params *params,
4178 struct link_vars *vars)
4179{
4180 struct bnx2x *bp = params->bp;
4181 u8 port = params->port;
4182 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
4183 bnx2x_set_led(bp, port, LED_MODE_OFF,
4184 0, params->hw_led_mode,
4185 params->chip_id);
4186
4187 /* indicate no mac active */
4188 vars->mac_type = MAC_TYPE_NONE;
4189
4190 /* update shared memory */
4191 vars->link_status = 0;
4192 vars->line_speed = 0;
4193 bnx2x_update_mng(params, vars->link_status);
4194
4195 /* activate nig drain */
4196 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
4197
4198 /* reset BigMac */
4199 bnx2x_bmac_rx_disable(bp, params->port);
4200 REG_WR(bp, GRCBASE_MISC +
4201 MISC_REGISTERS_RESET_REG_2_CLEAR,
4202 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
4203 return 0;
4204}
4205
4206static u8 bnx2x_update_link_up(struct link_params *params,
4207 struct link_vars *vars,
4208 u8 link_10g, u32 gp_status)
4209{
4210 struct bnx2x *bp = params->bp;
4211 u8 port = params->port;
4212 u8 rc = 0;
4213 vars->link_status |= LINK_STATUS_LINK_UP;
4214 if (link_10g) {
4215 bnx2x_bmac_enable(params, vars, 0);
4216 bnx2x_set_led(bp, port, LED_MODE_OPER,
4217 SPEED_10000, params->hw_led_mode,
4218 params->chip_id);
4219
4220 } else {
4221 bnx2x_emac_enable(params, vars, 0);
4222 rc = bnx2x_emac_program(params, vars->line_speed,
4223 vars->duplex);
4224
4225 /* AN complete? */
4226 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
4227 if (!(vars->phy_flags &
4228 PHY_SGMII_FLAG))
4229 bnx2x_set_sgmii_tx_driver(params);
4230 }
4231 }
4232
4233 /* PBF - link up */
4234 rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
4235 vars->line_speed);
4236
4237 /* disable drain */
4238 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
4239
4240 /* update shared memory */
4241 bnx2x_update_mng(params, vars->link_status);
4242 return rc;
4243}
3998/* This function should called upon link interrupt */ 4244/* This function should called upon link interrupt */
3999/* In case vars->link_up, driver needs to 4245/* In case vars->link_up, driver needs to
4000 1. Update the pbf 4246 1. Update the pbf
@@ -4012,10 +4258,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4012{ 4258{
4013 struct bnx2x *bp = params->bp; 4259 struct bnx2x *bp = params->bp;
4014 u8 port = params->port; 4260 u8 port = params->port;
4015 u16 i;
4016 u16 gp_status; 4261 u16 gp_status;
4017 u16 link_10g; 4262 u8 link_10g;
4018 u8 rc = 0; 4263 u8 ext_phy_link_up, rc = 0;
4264 u32 ext_phy_type;
4019 4265
4020 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n", 4266 DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
4021 port, 4267 port,
@@ -4031,15 +4277,16 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4031 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 4277 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
4032 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 4278 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
4033 4279
4280 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
4034 4281
4035 /* avoid fast toggling */ 4282 /* Check external link change only for non-direct */
4036 for (i = 0; i < 10; i++) { 4283 ext_phy_link_up = bnx2x_ext_phy_is_link_up(params, vars);
4037 msleep(10); 4284
4038 CL45_RD_OVER_CL22(bp, port, params->phy_addr, 4285 /* Read gp_status */
4039 MDIO_REG_BANK_GP_STATUS, 4286 CL45_RD_OVER_CL22(bp, port, params->phy_addr,
4040 MDIO_GP_STATUS_TOP_AN_STATUS1, 4287 MDIO_REG_BANK_GP_STATUS,
4041 &gp_status); 4288 MDIO_GP_STATUS_TOP_AN_STATUS1,
4042 } 4289 &gp_status);
4043 4290
4044 rc = bnx2x_link_settings_status(params, vars, gp_status); 4291 rc = bnx2x_link_settings_status(params, vars, gp_status);
4045 if (rc != 0) 4292 if (rc != 0)
@@ -4055,73 +4302,177 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4055 4302
4056 bnx2x_link_int_ack(params, vars, link_10g); 4303 bnx2x_link_int_ack(params, vars, link_10g);
4057 4304
4305 /* In case external phy link is up, and internal link is down
4306 ( not initialized yet probably after link initialization, it needs
4307 to be initialized.
4308 Note that after link down-up as result of cable plug,
4309 the xgxs link would probably become up again without the need to
4310 initialize it*/
4311
4312 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
4313 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
4314 (ext_phy_link_up && !vars->phy_link_up))
4315 bnx2x_init_internal_phy(params, vars);
4316
4058 /* link is up only if both local phy and external phy are up */ 4317 /* link is up only if both local phy and external phy are up */
4059 vars->link_up = (vars->phy_link_up && 4318 vars->link_up = (ext_phy_link_up && vars->phy_link_up);
4060 bnx2x_ext_phy_is_link_up(params, vars));
4061 4319
4062 if (!vars->phy_link_up && 4320 if (vars->link_up)
4063 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18)) { 4321 rc = bnx2x_update_link_up(params, vars, link_10g, gp_status);
4064 bnx2x_ext_phy_is_link_up(params, vars); /* Clear interrupt */ 4322 else
4323 rc = bnx2x_update_link_down(params, vars);
4324
4325 return rc;
4326}
4327
4328static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
4329{
4330 u8 ext_phy_addr[PORT_MAX];
4331 u16 val;
4332 s8 port;
4333
4334 /* PART1 - Reset both phys */
4335 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
4336 /* Extract the ext phy address for the port */
4337 u32 ext_phy_config = REG_RD(bp, shmem_base +
4338 offsetof(struct shmem_region,
4339 dev_info.port_hw_config[port].external_phy_config));
4340
4341 /* disable attentions */
4342 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
4343 (NIG_MASK_XGXS0_LINK_STATUS |
4344 NIG_MASK_XGXS0_LINK10G |
4345 NIG_MASK_SERDES0_LINK_STATUS |
4346 NIG_MASK_MI_INT));
4347
4348 ext_phy_addr[port] =
4349 ((ext_phy_config &
4350 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
4351 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
4352
4353 /* Need to take the phy out of low power mode in order
4354 to write to access its registers */
4355 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4356 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
4357
4358 /* Reset the phy */
4359 bnx2x_cl45_write(bp, port,
4360 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4361 ext_phy_addr[port],
4362 MDIO_PMA_DEVAD,
4363 MDIO_PMA_REG_CTRL,
4364 1<<15);
4065 } 4365 }
4066 4366
4067 if (vars->link_up) { 4367 /* Add delay of 150ms after reset */
4068 vars->link_status |= LINK_STATUS_LINK_UP; 4368 msleep(150);
4069 if (link_10g) {
4070 bnx2x_bmac_enable(params, vars, 0);
4071 bnx2x_set_led(bp, port, LED_MODE_OPER,
4072 SPEED_10000, params->hw_led_mode,
4073 params->chip_id);
4074 4369
4075 } else { 4370 /* PART2 - Download firmware to both phys */
4076 bnx2x_emac_enable(params, vars, 0); 4371 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
4077 rc = bnx2x_emac_program(params, vars->line_speed, 4372 u16 fw_ver1;
4078 vars->duplex);
4079 4373
4080 /* AN complete? */ 4374 bnx2x_bcm8073_external_rom_boot(bp, port,
4081 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { 4375 ext_phy_addr[port]);
4082 if (!(vars->phy_flags & 4376
4083 PHY_SGMII_FLAG)) 4377 bnx2x_cl45_read(bp, port, PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4084 bnx2x_set_sgmii_tx_driver(params); 4378 ext_phy_addr[port],
4085 } 4379 MDIO_PMA_DEVAD,
4380 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
4381 if (fw_ver1 == 0) {
4382 DP(NETIF_MSG_LINK,
4383 "bnx2x_8073_common_init_phy port %x "
4384 "fw Download failed\n", port);
4385 return -EINVAL;
4086 } 4386 }
4087 4387
4088 /* PBF - link up */ 4388 /* Only set bit 10 = 1 (Tx power down) */
4089 rc |= bnx2x_pbf_update(params, vars->flow_ctrl, 4389 bnx2x_cl45_read(bp, port,
4090 vars->line_speed); 4390 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4391 ext_phy_addr[port],
4392 MDIO_PMA_DEVAD,
4393 MDIO_PMA_REG_TX_POWER_DOWN, &val);
4091 4394
4092 /* disable drain */ 4395 /* Phase1 of TX_POWER_DOWN reset */
4093 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); 4396 bnx2x_cl45_write(bp, port,
4397 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4398 ext_phy_addr[port],
4399 MDIO_PMA_DEVAD,
4400 MDIO_PMA_REG_TX_POWER_DOWN,
4401 (val | 1<<10));
4402 }
4094 4403
4095 /* update shared memory */ 4404 /* Toggle Transmitter: Power down and then up with 600ms
4096 bnx2x_update_mng(params, vars->link_status); 4405 delay between */
4406 msleep(600);
4097 4407
4098 } else { /* link down */ 4408 /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
4099 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", params->port); 4409 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
4100 bnx2x_set_led(bp, port, LED_MODE_OFF, 4410 /* Phase2 of POWER_DOWN_RESET*/
4101 0, params->hw_led_mode, 4411 /* Release bit 10 (Release Tx power down) */
4102 params->chip_id); 4412 bnx2x_cl45_read(bp, port,
4413 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4414 ext_phy_addr[port],
4415 MDIO_PMA_DEVAD,
4416 MDIO_PMA_REG_TX_POWER_DOWN, &val);
4103 4417
4104 /* indicate no mac active */ 4418 bnx2x_cl45_write(bp, port,
4105 vars->mac_type = MAC_TYPE_NONE; 4419 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4420 ext_phy_addr[port],
4421 MDIO_PMA_DEVAD,
4422 MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
4423 msleep(15);
4106 4424
4107 /* update shared memory */ 4425 /* Read modify write the SPI-ROM version select register */
4108 vars->link_status = 0; 4426 bnx2x_cl45_read(bp, port,
4109 bnx2x_update_mng(params, vars->link_status); 4427 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4428 ext_phy_addr[port],
4429 MDIO_PMA_DEVAD,
4430 MDIO_PMA_REG_EDC_FFE_MAIN, &val);
4431 bnx2x_cl45_write(bp, port,
4432 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
4433 ext_phy_addr[port],
4434 MDIO_PMA_DEVAD,
4435 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
4110 4436
4111 /* activate nig drain */ 4437 /* set GPIO2 back to LOW */
4112 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 4438 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
4439 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
4440 }
4441 return 0;
4113 4442
4114 /* reset BigMac */ 4443}
4115 bnx2x_bmac_rx_disable(bp, params->port);
4116 REG_WR(bp, GRCBASE_MISC +
4117 MISC_REGISTERS_RESET_REG_2_CLEAR,
4118 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
4119 4444
4445u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
4446{
4447 u8 rc = 0;
4448 u32 ext_phy_type;
4449
4450 DP(NETIF_MSG_LINK, "bnx2x_common_init_phy\n");
4451
4452 /* Read the ext_phy_type for arbitrary port(0) */
4453 ext_phy_type = XGXS_EXT_PHY_TYPE(
4454 REG_RD(bp, shmem_base +
4455 offsetof(struct shmem_region,
4456 dev_info.port_hw_config[0].external_phy_config)));
4457
4458 switch (ext_phy_type) {
4459 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4460 {
4461 rc = bnx2x_8073_common_init_phy(bp, shmem_base);
4462 break;
4463 }
4464 default:
4465 DP(NETIF_MSG_LINK,
4466 "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
4467 ext_phy_type);
4468 break;
4120 } 4469 }
4121 4470
4122 return rc; 4471 return rc;
4123} 4472}
4124 4473
4474
4475
4125static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr) 4476static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
4126{ 4477{
4127 u16 val, cnt; 4478 u16 val, cnt;
@@ -4154,7 +4505,7 @@ static void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, u8 port, u8 phy_addr)
4154} 4505}
4155#define RESERVED_SIZE 256 4506#define RESERVED_SIZE 256
4156/* max application is 160K bytes - data at end of RAM */ 4507/* max application is 160K bytes - data at end of RAM */
4157#define MAX_APP_SIZE 160*1024 - RESERVED_SIZE 4508#define MAX_APP_SIZE (160*1024 - RESERVED_SIZE)
4158 4509
4159/* Header is 14 bytes */ 4510/* Header is 14 bytes */
4160#define HEADER_SIZE 14 4511#define HEADER_SIZE 14
@@ -4192,12 +4543,12 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4192 size = MAX_APP_SIZE+HEADER_SIZE; 4543 size = MAX_APP_SIZE+HEADER_SIZE;
4193 } 4544 }
4194 DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]); 4545 DP(NETIF_MSG_LINK, "File version is %c%c\n", data[0x14e], data[0x14f]);
4195 DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]); 4546 DP(NETIF_MSG_LINK, " %c%c\n", data[0x150], data[0x151]);
4196 /* Put the DSP in download mode by setting FLASH_CFG[2] to 1 4547 /* Put the DSP in download mode by setting FLASH_CFG[2] to 1
4197 and issuing a reset.*/ 4548 and issuing a reset.*/
4198 4549
4199 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, 4550 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
4200 MISC_REGISTERS_GPIO_HIGH); 4551 MISC_REGISTERS_GPIO_HIGH, port);
4201 4552
4202 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); 4553 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
4203 4554
@@ -4429,7 +4780,8 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4429 } 4780 }
4430 4781
4431 /* DSP Remove Download Mode */ 4782 /* DSP Remove Download Mode */
4432 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, MISC_REGISTERS_GPIO_LOW); 4783 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
4784 MISC_REGISTERS_GPIO_LOW, port);
4433 4785
4434 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr); 4786 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
4435 4787
@@ -4437,7 +4789,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4437 for (cnt = 0; cnt < 100; cnt++) 4789 for (cnt = 0; cnt < 100; cnt++)
4438 msleep(5); 4790 msleep(5);
4439 4791
4440 bnx2x_hw_reset(bp); 4792 bnx2x_hw_reset(bp, port);
4441 4793
4442 for (cnt = 0; cnt < 100; cnt++) 4794 for (cnt = 0; cnt < 100; cnt++)
4443 msleep(5); 4795 msleep(5);
@@ -4473,7 +4825,7 @@ static u8 bnx2x_sfx7101_flash_download(struct bnx2x *bp, u8 port,
4473 MDIO_PMA_REG_7101_VER2, 4825 MDIO_PMA_REG_7101_VER2,
4474 &image_revision2); 4826 &image_revision2);
4475 4827
4476 if (data[0x14e] != (image_revision2&0xFF) || 4828 if (data[0x14e] != (image_revision2&0xFF) ||
4477 data[0x14f] != ((image_revision2&0xFF00)>>8) || 4829 data[0x14f] != ((image_revision2&0xFF00)>>8) ||
4478 data[0x150] != (image_revision1&0xFF) || 4830 data[0x150] != (image_revision1&0xFF) ||
4479 data[0x151] != ((image_revision1&0xFF00)>>8)) { 4831 data[0x151] != ((image_revision1&0xFF00)>>8)) {
@@ -4508,11 +4860,11 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
4508 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: 4860 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4509 /* Take ext phy out of reset */ 4861 /* Take ext phy out of reset */
4510 if (!driver_loaded) 4862 if (!driver_loaded)
4511 bnx2x_turn_on_sf(bp, port, ext_phy_addr); 4863 bnx2x_turn_on_ef(bp, port, ext_phy_addr, ext_phy_type);
4512 rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr, 4864 rc = bnx2x_sfx7101_flash_download(bp, port, ext_phy_addr,
4513 data, size); 4865 data, size);
4514 if (!driver_loaded) 4866 if (!driver_loaded)
4515 bnx2x_turn_off_sf(bp); 4867 bnx2x_turn_off_sf(bp, port);
4516 break; 4868 break;
4517 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 4869 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
4518 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 4870 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index 714d37ac95de..86d54a17b411 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -55,14 +55,17 @@ struct link_params {
55#define LOOPBACK_BMAC 2 55#define LOOPBACK_BMAC 2
56#define LOOPBACK_XGXS_10 3 56#define LOOPBACK_XGXS_10 3
57#define LOOPBACK_EXT_PHY 4 57#define LOOPBACK_EXT_PHY 4
58#define LOOPBACK_EXT 5
58 59
59 u16 req_duplex; 60 u16 req_duplex;
60 u16 req_flow_ctrl; 61 u16 req_flow_ctrl;
62 u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
63 req_flow_ctrl is set to AUTO */
61 u16 req_line_speed; /* Also determine AutoNeg */ 64 u16 req_line_speed; /* Also determine AutoNeg */
62 65
63 /* Device parameters */ 66 /* Device parameters */
64 u8 mac_addr[6]; 67 u8 mac_addr[6];
65 u16 mtu; 68
66 69
67 70
68 /* shmem parameters */ 71 /* shmem parameters */
@@ -140,7 +143,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, u8 port, u32 ext_phy_type,
140 u8 phy_addr, u8 devad, u16 reg, u16 val); 143 u8 phy_addr, u8 devad, u16 reg, u16 val);
141 144
142/* Reads the link_status from the shmem, 145/* Reads the link_status from the shmem,
143 and update the link vars accordinaly */ 146 and update the link vars accordingly */
144void bnx2x_link_status_update(struct link_params *input, 147void bnx2x_link_status_update(struct link_params *input,
145 struct link_vars *output); 148 struct link_vars *output);
146/* returns string representing the fw_version of the external phy */ 149/* returns string representing the fw_version of the external phy */
@@ -149,7 +152,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
149 152
150/* Set/Unset the led 153/* Set/Unset the led
151 Basically, the CLC takes care of the led for the link, but in case one needs 154 Basically, the CLC takes care of the led for the link, but in case one needs
152 to set/unset the led unnatually, set the "mode" to LED_MODE_OPER to 155 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
153 blink the led, and LED_MODE_OFF to set the led off.*/ 156 blink the led, and LED_MODE_OFF to set the led off.*/
154u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, 157u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
155 u16 hw_led_mode, u32 chip_id); 158 u16 hw_led_mode, u32 chip_id);
@@ -164,5 +167,7 @@ u8 bnx2x_flash_download(struct bnx2x *bp, u8 port, u32 ext_phy_config,
164 otherwise link is down*/ 167 otherwise link is down*/
165u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars); 168u8 bnx2x_test_link(struct link_params *input, struct link_vars *vars);
166 169
170/* One-time initialization for external phy after power up */
171u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base);
167 172
168#endif /* BNX2X_LINK_H */ 173#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 272a4bd25953..3e7dc171cdf1 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -60,8 +60,8 @@
60#include "bnx2x.h" 60#include "bnx2x.h"
61#include "bnx2x_init.h" 61#include "bnx2x_init.h"
62 62
63#define DRV_MODULE_VERSION "1.45.6" 63#define DRV_MODULE_VERSION "1.45.17"
64#define DRV_MODULE_RELDATE "2008/06/23" 64#define DRV_MODULE_RELDATE "2008/08/13"
65#define BNX2X_BC_VER 0x040200 65#define BNX2X_BC_VER 0x040200
66 66
67/* Time in jiffies before concluding the transmitter is hung */ 67/* Time in jiffies before concluding the transmitter is hung */
@@ -76,23 +76,21 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL"); 76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION); 77MODULE_VERSION(DRV_MODULE_VERSION);
78 78
79static int disable_tpa;
79static int use_inta; 80static int use_inta;
80static int poll; 81static int poll;
81static int debug; 82static int debug;
82static int disable_tpa;
83static int nomcp;
84static int load_count[3]; /* 0-common, 1-port0, 2-port1 */ 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
85static int use_multi; 84static int use_multi;
86 85
86module_param(disable_tpa, int, 0);
87module_param(use_inta, int, 0); 87module_param(use_inta, int, 0);
88module_param(poll, int, 0); 88module_param(poll, int, 0);
89module_param(debug, int, 0); 89module_param(debug, int, 0);
90module_param(disable_tpa, int, 0); 90MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91module_param(nomcp, int, 0);
92MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); 91MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
93MODULE_PARM_DESC(poll, "use polling (for debug)"); 92MODULE_PARM_DESC(poll, "use polling (for debug)");
94MODULE_PARM_DESC(debug, "default debug msglevel"); 93MODULE_PARM_DESC(debug, "default debug msglevel");
95MODULE_PARM_DESC(nomcp, "ignore management CPU");
96 94
97#ifdef BNX2X_MULTI 95#ifdef BNX2X_MULTI
98module_param(use_multi, int, 0); 96module_param(use_multi, int, 0);
@@ -237,17 +235,16 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
237 while (*wb_comp != DMAE_COMP_VAL) { 235 while (*wb_comp != DMAE_COMP_VAL) {
238 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp); 236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
239 237
240 /* adjust delay for emulation/FPGA */
241 if (CHIP_REV_IS_SLOW(bp))
242 msleep(100);
243 else
244 udelay(5);
245
246 if (!cnt) { 238 if (!cnt) {
247 BNX2X_ERR("dmae timeout!\n"); 239 BNX2X_ERR("dmae timeout!\n");
248 break; 240 break;
249 } 241 }
250 cnt--; 242 cnt--;
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
251 } 248 }
252 249
253 mutex_unlock(&bp->dmae_mutex); 250 mutex_unlock(&bp->dmae_mutex);
@@ -310,17 +307,16 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
310 307
311 while (*wb_comp != DMAE_COMP_VAL) { 308 while (*wb_comp != DMAE_COMP_VAL) {
312 309
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
318
319 if (!cnt) { 310 if (!cnt) {
320 BNX2X_ERR("dmae timeout!\n"); 311 BNX2X_ERR("dmae timeout!\n");
321 break; 312 break;
322 } 313 }
323 cnt--; 314 cnt--;
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
324 } 320 }
325 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n", 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
326 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1], 322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
@@ -503,6 +499,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
503 int i; 499 int i;
504 u16 j, start, end; 500 u16 j, start, end;
505 501
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
506 BNX2X_ERR("begin crash dump -----------------\n"); 505 BNX2X_ERR("begin crash dump -----------------\n");
507 506
508 for_each_queue(bp, i) { 507 for_each_queue(bp, i) {
@@ -513,17 +512,20 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
513 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
514 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
515 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
516 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)" 515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
517 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)" 516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
518 " rx_sge_prod(%x) last_max_sge(%x)\n", 517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
519 fp->rx_comp_prod, fp->rx_comp_cons, 518 fp->rx_bd_prod, fp->rx_bd_cons,
520 le16_to_cpu(*fp->rx_cons_sb), 519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
521 le16_to_cpu(*fp->rx_bd_cons_sb), 520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
522 fp->rx_sge_prod, fp->last_max_sge); 521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
523 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)" 522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
524 " bd data(%x,%x) rx_alloc_failed(%lx)\n", 523 " *sb_u_idx(%x) bd data(%x,%x)\n",
525 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod, 524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
526 hw_prods->bds_prod, fp->rx_alloc_failed); 525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
527 529
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245); 531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
@@ -553,8 +555,8 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
553 j, rx_bd[1], rx_bd[0], sw_bd->skb); 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
554 } 556 }
555 557
556 start = 0; 558 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE_CNT*NUM_RX_SGE_PAGES; 559 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) { 560 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j]; 561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j]; 562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
@@ -582,9 +584,6 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
582 bnx2x_fw_dump(bp); 584 bnx2x_fw_dump(bp);
583 bnx2x_mc_assert(bp); 585 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n"); 586 BNX2X_ERR("end crash dump -----------------\n");
585
586 bp->stats_state = STATS_STATE_DISABLED;
587 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
588} 587}
589 588
590static void bnx2x_int_enable(struct bnx2x *bp) 589static void bnx2x_int_enable(struct bnx2x *bp)
@@ -684,7 +683,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp)
684static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
685 u8 storm, u16 index, u8 op, u8 update) 684 u8 storm, u16 index, u8 op, u8 update)
686{ 685{
687 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
688 struct igu_ack_register igu_ack; 688 struct igu_ack_register igu_ack;
689 689
690 igu_ack.status_block_index = index; 690 igu_ack.status_block_index = index;
@@ -694,9 +694,9 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696 696
697 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n", 697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); 698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack)); 699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700} 700}
701 701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -716,36 +716,15 @@ static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
716 return rc; 716 return rc;
717} 717}
718 718
719static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
720{
721 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
722
723 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
724 rx_cons_sb++;
725
726 if ((fp->rx_comp_cons != rx_cons_sb) ||
727 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
728 (fp->tx_pkt_prod != fp->tx_pkt_cons))
729 return 1;
730
731 return 0;
732}
733
734static u16 bnx2x_ack_int(struct bnx2x *bp) 719static u16 bnx2x_ack_int(struct bnx2x *bp)
735{ 720{
736 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
737 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr); 722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
738 724
739 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n", 725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740 result, BAR_IGU_INTMEM + igu_addr); 726 result, hc_addr);
741 727
742#ifdef IGU_DEBUG
743#warning IGU_DEBUG active
744 if (result == 0) {
745 BNX2X_ERR("read %x from IGU\n", result);
746 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
747 }
748#endif
749 return result; 728 return result;
750} 729}
751 730
@@ -898,6 +877,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
898 netif_tx_lock(bp->dev); 877 netif_tx_lock(bp->dev);
899 878
900 if (netif_queue_stopped(bp->dev) && 879 if (netif_queue_stopped(bp->dev) &&
880 (bp->state == BNX2X_STATE_OPEN) &&
901 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
902 netif_wake_queue(bp->dev); 882 netif_wake_queue(bp->dev);
903 883
@@ -905,6 +885,7 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
905 } 885 }
906} 886}
907 887
888
908static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 889static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
909 union eth_rx_cqe *rr_cqe) 890 union eth_rx_cqe *rr_cqe)
910{ 891{
@@ -960,6 +941,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
961 break; 942 break;
962 943
944
963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): 946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
@@ -1169,8 +1151,8 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1169 memset(fp->sge_mask, 0xff, 1151 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64)); 1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171 1153
1172 /* Clear the two last indeces in the page to 1: 1154 /* Clear the two last indices in the page to 1:
1173 these are the indeces that correspond to the "next" element, 1155 these are the indices that correspond to the "next" element,
1174 hence will never be indicated and should be removed from 1156 hence will never be indicated and should be removed from
1175 the calculations. */ 1157 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp); 1158 bnx2x_clear_sge_mask_next_elems(fp);
@@ -1261,7 +1243,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1261 where we are and drop the whole packet */ 1243 where we are and drop the whole packet */
1262 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); 1244 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1263 if (unlikely(err)) { 1245 if (unlikely(err)) {
1264 fp->rx_alloc_failed++; 1246 bp->eth_stats.rx_skb_alloc_failed++;
1265 return err; 1247 return err;
1266 } 1248 }
1267 1249
@@ -1297,14 +1279,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1298 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); 1280 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1299 1281
1300 /* if alloc failed drop the packet and keep the buffer in the bin */
1301 if (likely(new_skb)) { 1282 if (likely(new_skb)) {
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
1302 1285
1303 prefetch(skb); 1286 prefetch(skb);
1304 prefetch(((char *)(skb)) + 128); 1287 prefetch(((char *)(skb)) + 128);
1305 1288
1306 /* else fix ip xsum and give it to the stack */
1307 /* (no need to map the new skb) */
1308#ifdef BNX2X_STOP_ON_ERROR 1289#ifdef BNX2X_STOP_ON_ERROR
1309 if (pad + len > bp->rx_buf_size) { 1290 if (pad + len > bp->rx_buf_size) {
1310 BNX2X_ERR("skb_put is about to fail... " 1291 BNX2X_ERR("skb_put is about to fail... "
@@ -1353,9 +1334,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1353 fp->tpa_pool[queue].skb = new_skb; 1334 fp->tpa_pool[queue].skb = new_skb;
1354 1335
1355 } else { 1336 } else {
1337 /* else drop the packet and keep the buffer in the bin */
1356 DP(NETIF_MSG_RX_STATUS, 1338 DP(NETIF_MSG_RX_STATUS,
1357 "Failed to allocate new skb - dropping packet!\n"); 1339 "Failed to allocate new skb - dropping packet!\n");
1358 fp->rx_alloc_failed++; 1340 bp->eth_stats.rx_skb_alloc_failed++;
1359 } 1341 }
1360 1342
1361 fp->tpa_state[queue] = BNX2X_TPA_STOP; 1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
@@ -1390,7 +1372,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1390 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; 1372 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1391 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod; 1373 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1392 int rx_pkt = 0; 1374 int rx_pkt = 0;
1393 u16 queue;
1394 1375
1395#ifdef BNX2X_STOP_ON_ERROR 1376#ifdef BNX2X_STOP_ON_ERROR
1396 if (unlikely(bp->panic)) 1377 if (unlikely(bp->panic))
@@ -1456,7 +1437,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1456 if ((!fp->disable_tpa) && 1437 if ((!fp->disable_tpa) &&
1457 (TPA_TYPE(cqe_fp_flags) != 1438 (TPA_TYPE(cqe_fp_flags) !=
1458 (TPA_TYPE_START | TPA_TYPE_END))) { 1439 (TPA_TYPE_START | TPA_TYPE_END))) {
1459 queue = cqe->fast_path_cqe.queue_index; 1440 u16 queue = cqe->fast_path_cqe.queue_index;
1460 1441
1461 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) { 1442 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1462 DP(NETIF_MSG_RX_STATUS, 1443 DP(NETIF_MSG_RX_STATUS,
@@ -1503,11 +1484,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1503 1484
1504 /* is this an error packet? */ 1485 /* is this an error packet? */
1505 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) { 1486 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1506 /* do we sometimes forward error packets anyway? */
1507 DP(NETIF_MSG_RX_ERR, 1487 DP(NETIF_MSG_RX_ERR,
1508 "ERROR flags %x rx packet %u\n", 1488 "ERROR flags %x rx packet %u\n",
1509 cqe_fp_flags, sw_comp_cons); 1489 cqe_fp_flags, sw_comp_cons);
1510 /* TBD make sure MC counts this as a drop */ 1490 bp->eth_stats.rx_err_discard_pkt++;
1511 goto reuse_rx; 1491 goto reuse_rx;
1512 } 1492 }
1513 1493
@@ -1524,7 +1504,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1524 DP(NETIF_MSG_RX_ERR, 1504 DP(NETIF_MSG_RX_ERR,
1525 "ERROR packet dropped " 1505 "ERROR packet dropped "
1526 "because of alloc failure\n"); 1506 "because of alloc failure\n");
1527 fp->rx_alloc_failed++; 1507 bp->eth_stats.rx_skb_alloc_failed++;
1528 goto reuse_rx; 1508 goto reuse_rx;
1529 } 1509 }
1530 1510
@@ -1550,7 +1530,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 DP(NETIF_MSG_RX_ERR, 1530 DP(NETIF_MSG_RX_ERR,
1551 "ERROR packet dropped because " 1531 "ERROR packet dropped because "
1552 "of alloc failure\n"); 1532 "of alloc failure\n");
1553 fp->rx_alloc_failed++; 1533 bp->eth_stats.rx_skb_alloc_failed++;
1554reuse_rx: 1534reuse_rx:
1555 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod); 1535 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1556 goto next_rx; 1536 goto next_rx;
@@ -1559,10 +1539,12 @@ reuse_rx:
1559 skb->protocol = eth_type_trans(skb, bp->dev); 1539 skb->protocol = eth_type_trans(skb, bp->dev);
1560 1540
1561 skb->ip_summed = CHECKSUM_NONE; 1541 skb->ip_summed = CHECKSUM_NONE;
1562 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe)) 1542 if (bp->rx_csum) {
1563 skb->ip_summed = CHECKSUM_UNNECESSARY; 1543 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1564 1544 skb->ip_summed = CHECKSUM_UNNECESSARY;
1565 /* TBD do we pass bad csum packets in promisc */ 1545 else
1546 bp->eth_stats.hw_csum_err++;
1547 }
1566 } 1548 }
1567 1549
1568#ifdef BCM_VLAN 1550#ifdef BCM_VLAN
@@ -1615,6 +1597,12 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1615 struct net_device *dev = bp->dev; 1597 struct net_device *dev = bp->dev;
1616 int index = FP_IDX(fp); 1598 int index = FP_IDX(fp);
1617 1599
1600 /* Return here if interrupt is disabled */
1601 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1602 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1603 return IRQ_HANDLED;
1604 }
1605
1618 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n", 1606 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1619 index, FP_SB_ID(fp)); 1607 index, FP_SB_ID(fp));
1620 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0); 1608 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1648,17 +1636,17 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1648 } 1636 }
1649 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status); 1637 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1650 1638
1651#ifdef BNX2X_STOP_ON_ERROR
1652 if (unlikely(bp->panic))
1653 return IRQ_HANDLED;
1654#endif
1655
1656 /* Return here if interrupt is disabled */ 1639 /* Return here if interrupt is disabled */
1657 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 1640 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1658 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 1641 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1659 return IRQ_HANDLED; 1642 return IRQ_HANDLED;
1660 } 1643 }
1661 1644
1645#ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1647 return IRQ_HANDLED;
1648#endif
1649
1662 mask = 0x2 << bp->fp[0].sb_id; 1650 mask = 0x2 << bp->fp[0].sb_id;
1663 if (status & mask) { 1651 if (status & mask) {
1664 struct bnx2x_fastpath *fp = &bp->fp[0]; 1652 struct bnx2x_fastpath *fp = &bp->fp[0];
@@ -1699,11 +1687,12 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1699 * General service functions 1687 * General service functions
1700 */ 1688 */
1701 1689
1702static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) 1690static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1703{ 1691{
1704 u32 lock_status; 1692 u32 lock_status;
1705 u32 resource_bit = (1 << resource); 1693 u32 resource_bit = (1 << resource);
1706 u8 port = BP_PORT(bp); 1694 int func = BP_FUNC(bp);
1695 u32 hw_lock_control_reg;
1707 int cnt; 1696 int cnt;
1708 1697
1709 /* Validating that the resource is within range */ 1698 /* Validating that the resource is within range */
@@ -1714,8 +1703,15 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1714 return -EINVAL; 1703 return -EINVAL;
1715 } 1704 }
1716 1705
1706 if (func <= 5) {
1707 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1708 } else {
1709 hw_lock_control_reg =
1710 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1711 }
1712
1717 /* Validating that the resource is not already taken */ 1713 /* Validating that the resource is not already taken */
1718 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1714 lock_status = REG_RD(bp, hw_lock_control_reg);
1719 if (lock_status & resource_bit) { 1715 if (lock_status & resource_bit) {
1720 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1716 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1721 lock_status, resource_bit); 1717 lock_status, resource_bit);
@@ -1725,9 +1721,8 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1725 /* Try for 1 second every 5ms */ 1721 /* Try for 1 second every 5ms */
1726 for (cnt = 0; cnt < 200; cnt++) { 1722 for (cnt = 0; cnt < 200; cnt++) {
1727 /* Try to acquire the lock */ 1723 /* Try to acquire the lock */
1728 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4, 1724 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1729 resource_bit); 1725 lock_status = REG_RD(bp, hw_lock_control_reg);
1730 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1731 if (lock_status & resource_bit) 1726 if (lock_status & resource_bit)
1732 return 0; 1727 return 0;
1733 1728
@@ -1737,11 +1732,12 @@ static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1737 return -EAGAIN; 1732 return -EAGAIN;
1738} 1733}
1739 1734
1740static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) 1735static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1741{ 1736{
1742 u32 lock_status; 1737 u32 lock_status;
1743 u32 resource_bit = (1 << resource); 1738 u32 resource_bit = (1 << resource);
1744 u8 port = BP_PORT(bp); 1739 int func = BP_FUNC(bp);
1740 u32 hw_lock_control_reg;
1745 1741
1746 /* Validating that the resource is within range */ 1742 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1743 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
@@ -1751,20 +1747,27 @@ static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1751 return -EINVAL; 1747 return -EINVAL;
1752 } 1748 }
1753 1749
1750 if (func <= 5) {
1751 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1752 } else {
1753 hw_lock_control_reg =
1754 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1755 }
1756
1754 /* Validating that the resource is currently taken */ 1757 /* Validating that the resource is currently taken */
1755 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8); 1758 lock_status = REG_RD(bp, hw_lock_control_reg);
1756 if (!(lock_status & resource_bit)) { 1759 if (!(lock_status & resource_bit)) {
1757 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", 1760 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1758 lock_status, resource_bit); 1761 lock_status, resource_bit);
1759 return -EFAULT; 1762 return -EFAULT;
1760 } 1763 }
1761 1764
1762 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit); 1765 REG_WR(bp, hw_lock_control_reg, resource_bit);
1763 return 0; 1766 return 0;
1764} 1767}
1765 1768
1766/* HW Lock for shared dual port PHYs */ 1769/* HW Lock for shared dual port PHYs */
1767static void bnx2x_phy_hw_lock(struct bnx2x *bp) 1770static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1768{ 1771{
1769 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1772 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1770 1773
@@ -1772,25 +1775,25 @@ static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1772 1775
1773 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1776 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1774 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1777 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1775 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1778 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1776} 1779}
1777 1780
1778static void bnx2x_phy_hw_unlock(struct bnx2x *bp) 1781static void bnx2x_release_phy_lock(struct bnx2x *bp)
1779{ 1782{
1780 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config); 1783 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1781 1784
1782 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) || 1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1783 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)) 1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1784 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); 1787 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1785 1788
1786 mutex_unlock(&bp->port.phy_mutex); 1789 mutex_unlock(&bp->port.phy_mutex);
1787} 1790}
1788 1791
1789int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) 1792int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1790{ 1793{
1791 /* The GPIO should be swapped if swap register is set and active */ 1794 /* The GPIO should be swapped if swap register is set and active */
1792 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && 1795 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1793 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp); 1796 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1794 int gpio_shift = gpio_num + 1797 int gpio_shift = gpio_num +
1795 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); 1798 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1796 u32 gpio_mask = (1 << gpio_shift); 1799 u32 gpio_mask = (1 << gpio_shift);
@@ -1801,7 +1804,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1801 return -EINVAL; 1804 return -EINVAL;
1802 } 1805 }
1803 1806
1804 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); 1807 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1805 /* read GPIO and mask except the float bits */ 1808 /* read GPIO and mask except the float bits */
1806 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); 1809 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1807 1810
@@ -1822,7 +1825,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1822 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); 1825 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1823 break; 1826 break;
1824 1827
1825 case MISC_REGISTERS_GPIO_INPUT_HI_Z : 1828 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1826 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", 1829 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1827 gpio_num, gpio_shift); 1830 gpio_num, gpio_shift);
1828 /* set FLOAT */ 1831 /* set FLOAT */
@@ -1834,7 +1837,7 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1834 } 1837 }
1835 1838
1836 REG_WR(bp, MISC_REG_GPIO, gpio_reg); 1839 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1837 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO); 1840 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1838 1841
1839 return 0; 1842 return 0;
1840} 1843}
@@ -1850,19 +1853,19 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1850 return -EINVAL; 1853 return -EINVAL;
1851 } 1854 }
1852 1855
1853 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 1856 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1854 /* read SPIO and mask except the float bits */ 1857 /* read SPIO and mask except the float bits */
1855 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 1858 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1856 1859
1857 switch (mode) { 1860 switch (mode) {
1858 case MISC_REGISTERS_SPIO_OUTPUT_LOW : 1861 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1859 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); 1862 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1860 /* clear FLOAT and set CLR */ 1863 /* clear FLOAT and set CLR */
1861 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1864 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1862 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 1865 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1863 break; 1866 break;
1864 1867
1865 case MISC_REGISTERS_SPIO_OUTPUT_HIGH : 1868 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1866 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); 1869 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1867 /* clear FLOAT and set SET */ 1870 /* clear FLOAT and set SET */
1868 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 1871 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
@@ -1880,7 +1883,7 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1880 } 1883 }
1881 1884
1882 REG_WR(bp, MISC_REG_SPIO, spio_reg); 1885 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1883 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO); 1886 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1884 1887
1885 return 0; 1888 return 0;
1886} 1889}
@@ -1940,46 +1943,63 @@ static void bnx2x_link_report(struct bnx2x *bp)
1940 1943
1941static u8 bnx2x_initial_phy_init(struct bnx2x *bp) 1944static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1942{ 1945{
1943 u8 rc; 1946 if (!BP_NOMCP(bp)) {
1947 u8 rc;
1944 1948
1945 /* Initialize link parameters structure variables */ 1949 /* Initialize link parameters structure variables */
1946 bp->link_params.mtu = bp->dev->mtu; 1950 /* It is recommended to turn off RX FC for jumbo frames
1951 for better performance */
1952 if (IS_E1HMF(bp))
1953 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1954 else if (bp->dev->mtu > 5000)
1955 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1956 else
1957 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1947 1958
1948 bnx2x_phy_hw_lock(bp); 1959 bnx2x_acquire_phy_lock(bp);
1949 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1960 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1950 bnx2x_phy_hw_unlock(bp); 1961 bnx2x_release_phy_lock(bp);
1951 1962
1952 if (bp->link_vars.link_up) 1963 if (bp->link_vars.link_up)
1953 bnx2x_link_report(bp); 1964 bnx2x_link_report(bp);
1954 1965
1955 bnx2x_calc_fc_adv(bp); 1966 bnx2x_calc_fc_adv(bp);
1956 1967
1957 return rc; 1968 return rc;
1969 }
1970 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1971 return -EINVAL;
1958} 1972}
1959 1973
1960static void bnx2x_link_set(struct bnx2x *bp) 1974static void bnx2x_link_set(struct bnx2x *bp)
1961{ 1975{
1962 bnx2x_phy_hw_lock(bp); 1976 if (!BP_NOMCP(bp)) {
1963 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1977 bnx2x_acquire_phy_lock(bp);
1964 bnx2x_phy_hw_unlock(bp); 1978 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1979 bnx2x_release_phy_lock(bp);
1965 1980
1966 bnx2x_calc_fc_adv(bp); 1981 bnx2x_calc_fc_adv(bp);
1982 } else
1983 BNX2X_ERR("Bootcode is missing -not setting link\n");
1967} 1984}
1968 1985
1969static void bnx2x__link_reset(struct bnx2x *bp) 1986static void bnx2x__link_reset(struct bnx2x *bp)
1970{ 1987{
1971 bnx2x_phy_hw_lock(bp); 1988 if (!BP_NOMCP(bp)) {
1972 bnx2x_link_reset(&bp->link_params, &bp->link_vars); 1989 bnx2x_acquire_phy_lock(bp);
1973 bnx2x_phy_hw_unlock(bp); 1990 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1991 bnx2x_release_phy_lock(bp);
1992 } else
1993 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1974} 1994}
1975 1995
1976static u8 bnx2x_link_test(struct bnx2x *bp) 1996static u8 bnx2x_link_test(struct bnx2x *bp)
1977{ 1997{
1978 u8 rc; 1998 u8 rc;
1979 1999
1980 bnx2x_phy_hw_lock(bp); 2000 bnx2x_acquire_phy_lock(bp);
1981 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2001 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1982 bnx2x_phy_hw_unlock(bp); 2002 bnx2x_release_phy_lock(bp);
1983 2003
1984 return rc; 2004 return rc;
1985} 2005}
@@ -1991,7 +2011,7 @@ static u8 bnx2x_link_test(struct bnx2x *bp)
1991 sum of vn_min_rates 2011 sum of vn_min_rates
1992 or 2012 or
1993 0 - if all the min_rates are 0. 2013 0 - if all the min_rates are 0.
1994 In the later case fainess algorithm should be deactivated. 2014 In the later case fairness algorithm should be deactivated.
1995 If not all min_rates are zero then those that are zeroes will 2015 If not all min_rates are zero then those that are zeroes will
1996 be set to 1. 2016 be set to 1.
1997 */ 2017 */
@@ -2114,7 +2134,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2114 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2134 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2115 /* If FAIRNESS is enabled (not all min rates are zeroes) and 2135 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2116 if current min rate is zero - set it to 1. 2136 if current min rate is zero - set it to 1.
2117 This is a requirment of the algorithm. */ 2137 This is a requirement of the algorithm. */
2118 if ((vn_min_rate == 0) && wsum) 2138 if ((vn_min_rate == 0) && wsum)
2119 vn_min_rate = DEF_MIN_RATE; 2139 vn_min_rate = DEF_MIN_RATE;
2120 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 2140 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
@@ -2203,9 +2223,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2203 /* Make sure that we are synced with the current statistics */ 2223 /* Make sure that we are synced with the current statistics */
2204 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2224 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2205 2225
2206 bnx2x_phy_hw_lock(bp); 2226 bnx2x_acquire_phy_lock(bp);
2207 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2227 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2208 bnx2x_phy_hw_unlock(bp); 2228 bnx2x_release_phy_lock(bp);
2209 2229
2210 if (bp->link_vars.link_up) { 2230 if (bp->link_vars.link_up) {
2211 2231
@@ -2357,7 +2377,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2357} 2377}
2358 2378
2359/* acquire split MCP access lock register */ 2379/* acquire split MCP access lock register */
2360static int bnx2x_lock_alr(struct bnx2x *bp) 2380static int bnx2x_acquire_alr(struct bnx2x *bp)
2361{ 2381{
2362 u32 i, j, val; 2382 u32 i, j, val;
2363 int rc = 0; 2383 int rc = 0;
@@ -2374,15 +2394,15 @@ static int bnx2x_lock_alr(struct bnx2x *bp)
2374 msleep(5); 2394 msleep(5);
2375 } 2395 }
2376 if (!(val & (1L << 31))) { 2396 if (!(val & (1L << 31))) {
2377 BNX2X_ERR("Cannot acquire nvram interface\n"); 2397 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2378 rc = -EBUSY; 2398 rc = -EBUSY;
2379 } 2399 }
2380 2400
2381 return rc; 2401 return rc;
2382} 2402}
2383 2403
2384/* Release split MCP access lock register */ 2404/* release split MCP access lock register */
2385static void bnx2x_unlock_alr(struct bnx2x *bp) 2405static void bnx2x_release_alr(struct bnx2x *bp)
2386{ 2406{
2387 u32 val = 0; 2407 u32 val = 0;
2388 2408
@@ -2395,7 +2415,6 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2395 u16 rc = 0; 2415 u16 rc = 0;
2396 2416
2397 barrier(); /* status block is written to by the chip */ 2417 barrier(); /* status block is written to by the chip */
2398
2399 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) { 2418 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2400 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index; 2419 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2401 rc |= 1; 2420 rc |= 1;
@@ -2426,26 +2445,31 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2426static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) 2445static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2427{ 2446{
2428 int port = BP_PORT(bp); 2447 int port = BP_PORT(bp);
2429 int func = BP_FUNC(bp); 2448 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2430 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8; 2449 COMMAND_REG_ATTN_BITS_SET);
2431 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2450 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2432 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2451 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2433 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : 2452 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2434 NIG_REG_MASK_INTERRUPT_PORT0; 2453 NIG_REG_MASK_INTERRUPT_PORT0;
2454 u32 aeu_mask;
2435 2455
2436 if (~bp->aeu_mask & (asserted & 0xff))
2437 BNX2X_ERR("IGU ERROR\n");
2438 if (bp->attn_state & asserted) 2456 if (bp->attn_state & asserted)
2439 BNX2X_ERR("IGU ERROR\n"); 2457 BNX2X_ERR("IGU ERROR\n");
2440 2458
2459 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2460 aeu_mask = REG_RD(bp, aeu_addr);
2461
2441 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2462 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2442 bp->aeu_mask, asserted); 2463 aeu_mask, asserted);
2443 bp->aeu_mask &= ~(asserted & 0xff); 2464 aeu_mask &= ~(asserted & 0xff);
2444 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask); 2465 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2445 2466
2446 REG_WR(bp, aeu_addr, bp->aeu_mask); 2467 REG_WR(bp, aeu_addr, aeu_mask);
2468 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2447 2469
2470 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2448 bp->attn_state |= asserted; 2471 bp->attn_state |= asserted;
2472 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2449 2473
2450 if (asserted & ATTN_HARD_WIRED_MASK) { 2474 if (asserted & ATTN_HARD_WIRED_MASK) {
2451 if (asserted & ATTN_NIG_FOR_FUNC) { 2475 if (asserted & ATTN_NIG_FOR_FUNC) {
@@ -2500,9 +2524,9 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2500 2524
2501 } /* if hardwired */ 2525 } /* if hardwired */
2502 2526
2503 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n", 2527 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2504 asserted, BAR_IGU_INTMEM + igu_addr); 2528 asserted, hc_addr);
2505 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted); 2529 REG_WR(bp, hc_addr, asserted);
2506 2530
2507 /* now set back the mask */ 2531 /* now set back the mask */
2508 if (asserted & ATTN_NIG_FOR_FUNC) 2532 if (asserted & ATTN_NIG_FOR_FUNC)
@@ -2530,12 +2554,12 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2530 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: 2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2531 /* Fan failure attention */ 2555 /* Fan failure attention */
2532 2556
2533 /* The PHY reset is controled by GPIO 1 */ 2557 /* The PHY reset is controlled by GPIO 1 */
2534 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 2558 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2535 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2559 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2536 /* Low power mode is controled by GPIO 2 */ 2560 /* Low power mode is controlled by GPIO 2 */
2537 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 2561 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2538 MISC_REGISTERS_GPIO_OUTPUT_LOW); 2562 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2539 /* mark the failure */ 2563 /* mark the failure */
2540 bp->link_params.ext_phy_config &= 2564 bp->link_params.ext_phy_config &=
2541 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; 2565 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
@@ -2699,10 +2723,11 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2699 int index; 2723 int index;
2700 u32 reg_addr; 2724 u32 reg_addr;
2701 u32 val; 2725 u32 val;
2726 u32 aeu_mask;
2702 2727
2703 /* need to take HW lock because MCP or other port might also 2728 /* need to take HW lock because MCP or other port might also
2704 try to handle this event */ 2729 try to handle this event */
2705 bnx2x_lock_alr(bp); 2730 bnx2x_acquire_alr(bp);
2706 2731
2707 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 2732 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2708 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 2733 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
@@ -2734,32 +2759,35 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2734 HW_PRTY_ASSERT_SET_1) || 2759 HW_PRTY_ASSERT_SET_1) ||
2735 (attn.sig[2] & group_mask.sig[2] & 2760 (attn.sig[2] & group_mask.sig[2] &
2736 HW_PRTY_ASSERT_SET_2)) 2761 HW_PRTY_ASSERT_SET_2))
2737 BNX2X_ERR("FATAL HW block parity attention\n"); 2762 BNX2X_ERR("FATAL HW block parity attention\n");
2738 } 2763 }
2739 } 2764 }
2740 2765
2741 bnx2x_unlock_alr(bp); 2766 bnx2x_release_alr(bp);
2742 2767
2743 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8; 2768 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2744 2769
2745 val = ~deasserted; 2770 val = ~deasserted;
2746/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n", 2771 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2747 val, BAR_IGU_INTMEM + reg_addr); */ 2772 val, reg_addr);
2748 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val); 2773 REG_WR(bp, reg_addr, val);
2749 2774
2750 if (bp->aeu_mask & (deasserted & 0xff))
2751 BNX2X_ERR("IGU BUG!\n");
2752 if (~bp->attn_state & deasserted) 2775 if (~bp->attn_state & deasserted)
2753 BNX2X_ERR("IGU BUG!\n"); 2776 BNX2X_ERR("IGU ERROR\n");
2754 2777
2755 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : 2778 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2756 MISC_REG_AEU_MASK_ATTN_FUNC_0; 2779 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2757 2780
2758 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask); 2781 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2759 bp->aeu_mask |= (deasserted & 0xff); 2782 aeu_mask = REG_RD(bp, reg_addr);
2783
2784 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2785 aeu_mask, deasserted);
2786 aeu_mask |= (deasserted & 0xff);
2787 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2760 2788
2761 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask); 2789 REG_WR(bp, reg_addr, aeu_mask);
2762 REG_WR(bp, reg_addr, bp->aeu_mask); 2790 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 2791
2764 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state); 2792 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2765 bp->attn_state &= ~deasserted; 2793 bp->attn_state &= ~deasserted;
@@ -2800,7 +2828,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2800 2828
2801 /* Return here if interrupt is disabled */ 2829 /* Return here if interrupt is disabled */
2802 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2830 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2803 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2831 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2804 return; 2832 return;
2805 } 2833 }
2806 2834
@@ -2808,7 +2836,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2808/* if (status == 0) */ 2836/* if (status == 0) */
2809/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 2837/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2810 2838
2811 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status); 2839 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2812 2840
2813 /* HW attentions */ 2841 /* HW attentions */
2814 if (status & 0x1) 2842 if (status & 0x1)
@@ -2838,7 +2866,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2838 2866
2839 /* Return here if interrupt is disabled */ 2867 /* Return here if interrupt is disabled */
2840 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 2868 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2841 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); 2869 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2842 return IRQ_HANDLED; 2870 return IRQ_HANDLED;
2843 } 2871 }
2844 2872
@@ -2876,11 +2904,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2876 /* underflow */ \ 2904 /* underflow */ \
2877 d_hi = m_hi - s_hi; \ 2905 d_hi = m_hi - s_hi; \
2878 if (d_hi > 0) { \ 2906 if (d_hi > 0) { \
2879 /* we can 'loan' 1 */ \ 2907 /* we can 'loan' 1 */ \
2880 d_hi--; \ 2908 d_hi--; \
2881 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \ 2909 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2882 } else { \ 2910 } else { \
2883 /* m_hi <= s_hi */ \ 2911 /* m_hi <= s_hi */ \
2884 d_hi = 0; \ 2912 d_hi = 0; \
2885 d_lo = 0; \ 2913 d_lo = 0; \
2886 } \ 2914 } \
@@ -2890,7 +2918,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2890 d_hi = 0; \ 2918 d_hi = 0; \
2891 d_lo = 0; \ 2919 d_lo = 0; \
2892 } else { \ 2920 } else { \
2893 /* m_hi >= s_hi */ \ 2921 /* m_hi >= s_hi */ \
2894 d_hi = m_hi - s_hi; \ 2922 d_hi = m_hi - s_hi; \
2895 d_lo = m_lo - s_lo; \ 2923 d_lo = m_lo - s_lo; \
2896 } \ 2924 } \
@@ -2963,37 +2991,6 @@ static inline long bnx2x_hilo(u32 *hiref)
2963 * Init service functions 2991 * Init service functions
2964 */ 2992 */
2965 2993
2966static void bnx2x_storm_stats_init(struct bnx2x *bp)
2967{
2968 int func = BP_FUNC(bp);
2969
2970 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2971 REG_WR(bp, BAR_XSTRORM_INTMEM +
2972 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2973
2974 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2975 REG_WR(bp, BAR_TSTRORM_INTMEM +
2976 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2977
2978 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2979 REG_WR(bp, BAR_CSTRORM_INTMEM +
2980 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2981
2982 REG_WR(bp, BAR_XSTRORM_INTMEM +
2983 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2984 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2985 REG_WR(bp, BAR_XSTRORM_INTMEM +
2986 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2987 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2988
2989 REG_WR(bp, BAR_TSTRORM_INTMEM +
2990 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2991 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2992 REG_WR(bp, BAR_TSTRORM_INTMEM +
2993 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2994 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2995}
2996
2997static void bnx2x_storm_stats_post(struct bnx2x *bp) 2994static void bnx2x_storm_stats_post(struct bnx2x *bp)
2998{ 2995{
2999 if (!bp->stats_pending) { 2996 if (!bp->stats_pending) {
@@ -3032,6 +3029,8 @@ static void bnx2x_stats_init(struct bnx2x *bp)
3032 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); 3029 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3033 bp->port.old_nig_stats.brb_discard = 3030 bp->port.old_nig_stats.brb_discard =
3034 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); 3031 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032 bp->port.old_nig_stats.brb_truncate =
3033 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, 3034 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); 3035 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3037 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, 3036 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
@@ -3101,12 +3100,12 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
3101 3100
3102 might_sleep(); 3101 might_sleep();
3103 while (*stats_comp != DMAE_COMP_VAL) { 3102 while (*stats_comp != DMAE_COMP_VAL) {
3104 msleep(1);
3105 if (!cnt) { 3103 if (!cnt) {
3106 BNX2X_ERR("timeout waiting for stats finished\n"); 3104 BNX2X_ERR("timeout waiting for stats finished\n");
3107 break; 3105 break;
3108 } 3106 }
3109 cnt--; 3107 cnt--;
3108 msleep(1);
3110 } 3109 }
3111 return 1; 3110 return 1;
3112} 3111}
@@ -3451,8 +3450,7 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3451 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); 3450 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3452 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); 3451 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3453 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); 3452 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf); 3453 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3455 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3456 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); 3454 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3457 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived); 3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3458 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); 3456 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
@@ -3536,6 +3534,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3536 3534
3537 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, 3535 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3538 new->brb_discard - old->brb_discard); 3536 new->brb_discard - old->brb_discard);
3537 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538 new->brb_truncate - old->brb_truncate);
3539 3539
3540 UPDATE_STAT64_NIG(egress_mac_pkt0, 3540 UPDATE_STAT64_NIG(egress_mac_pkt0,
3541 etherstatspkts1024octetsto1522octets); 3541 etherstatspkts1024octetsto1522octets);
@@ -3713,8 +3713,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
3713 nstats->rx_length_errors = 3713 nstats->rx_length_errors =
3714 estats->rx_stat_etherstatsundersizepkts_lo + 3714 estats->rx_stat_etherstatsundersizepkts_lo +
3715 estats->jabber_packets_received; 3715 estats->jabber_packets_received;
3716 nstats->rx_over_errors = estats->brb_drop_lo + 3716 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717 estats->brb_truncate_discard;
3718 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo; 3717 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3719 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo; 3718 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3720 nstats->rx_fifo_errors = old_tclient->no_buff_discard; 3719 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
@@ -3783,7 +3782,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
3783 bp->fp->rx_comp_cons), 3782 bp->fp->rx_comp_cons),
3784 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets); 3783 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3785 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n", 3784 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3786 netif_queue_stopped(bp->dev)? "Xoff" : "Xon", 3785 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
3787 estats->driver_xoff, estats->brb_drop_lo); 3786 estats->driver_xoff, estats->brb_drop_lo);
3788 printk(KERN_DEBUG "tstats: checksum_discard %u " 3787 printk(KERN_DEBUG "tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u " 3788 "packets_too_big_discard %u no_buff_discard %u "
@@ -3994,14 +3993,14 @@ static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3994 3993
3995 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM + 3994 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3995 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3997 sizeof(struct ustorm_def_status_block)/4); 3996 sizeof(struct ustorm_status_block)/4);
3998 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM + 3997 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0, 3998 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4000 sizeof(struct cstorm_def_status_block)/4); 3999 sizeof(struct cstorm_status_block)/4);
4001} 4000}
4002 4001
4003static void bnx2x_init_sb(struct bnx2x *bp, int sb_id, 4002static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4004 struct host_status_block *sb, dma_addr_t mapping) 4003 dma_addr_t mapping, int sb_id)
4005{ 4004{
4006 int port = BP_PORT(bp); 4005 int port = BP_PORT(bp);
4007 int func = BP_FUNC(bp); 4006 int func = BP_FUNC(bp);
@@ -4077,7 +4076,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4077 atten_status_block); 4076 atten_status_block);
4078 def_sb->atten_status_block.status_block_id = sb_id; 4077 def_sb->atten_status_block.status_block_id = sb_id;
4079 4078
4080 bp->def_att_idx = 0;
4081 bp->attn_state = 0; 4079 bp->attn_state = 0;
4082 4080
4083 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 4081 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -4094,9 +4092,6 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4094 reg_offset + 0xc + 0x10*index); 4092 reg_offset + 0xc + 0x10*index);
4095 } 4093 }
4096 4094
4097 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4098 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4099
4100 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L : 4095 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4101 HC_REG_ATTN_MSG0_ADDR_L); 4096 HC_REG_ATTN_MSG0_ADDR_L);
4102 4097
@@ -4114,17 +4109,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4114 u_def_status_block); 4109 u_def_status_block);
4115 def_sb->u_def_status_block.status_block_id = sb_id; 4110 def_sb->u_def_status_block.status_block_id = sb_id;
4116 4111
4117 bp->def_u_idx = 0;
4118
4119 REG_WR(bp, BAR_USTRORM_INTMEM + 4112 REG_WR(bp, BAR_USTRORM_INTMEM +
4120 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4113 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4121 REG_WR(bp, BAR_USTRORM_INTMEM + 4114 REG_WR(bp, BAR_USTRORM_INTMEM +
4122 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4115 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4123 U64_HI(section)); 4116 U64_HI(section));
4124 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF + 4117 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4125 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4118 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4126 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4127 BNX2X_BTR);
4128 4119
4129 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++) 4120 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4130 REG_WR16(bp, BAR_USTRORM_INTMEM + 4121 REG_WR16(bp, BAR_USTRORM_INTMEM +
@@ -4135,17 +4126,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4135 c_def_status_block); 4126 c_def_status_block);
4136 def_sb->c_def_status_block.status_block_id = sb_id; 4127 def_sb->c_def_status_block.status_block_id = sb_id;
4137 4128
4138 bp->def_c_idx = 0;
4139
4140 REG_WR(bp, BAR_CSTRORM_INTMEM + 4129 REG_WR(bp, BAR_CSTRORM_INTMEM +
4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4130 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4142 REG_WR(bp, BAR_CSTRORM_INTMEM + 4131 REG_WR(bp, BAR_CSTRORM_INTMEM +
4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4132 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4144 U64_HI(section)); 4133 U64_HI(section));
4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF + 4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4135 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4147 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4148 BNX2X_BTR);
4149 4136
4150 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++) 4137 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4151 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4138 REG_WR16(bp, BAR_CSTRORM_INTMEM +
@@ -4156,17 +4143,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4156 t_def_status_block); 4143 t_def_status_block);
4157 def_sb->t_def_status_block.status_block_id = sb_id; 4144 def_sb->t_def_status_block.status_block_id = sb_id;
4158 4145
4159 bp->def_t_idx = 0;
4160
4161 REG_WR(bp, BAR_TSTRORM_INTMEM + 4146 REG_WR(bp, BAR_TSTRORM_INTMEM +
4162 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4147 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4163 REG_WR(bp, BAR_TSTRORM_INTMEM + 4148 REG_WR(bp, BAR_TSTRORM_INTMEM +
4164 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4149 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4165 U64_HI(section)); 4150 U64_HI(section));
4166 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF + 4151 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4167 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4152 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4168 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4169 BNX2X_BTR);
4170 4153
4171 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++) 4154 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4172 REG_WR16(bp, BAR_TSTRORM_INTMEM + 4155 REG_WR16(bp, BAR_TSTRORM_INTMEM +
@@ -4177,23 +4160,20 @@ static void bnx2x_init_def_sb(struct bnx2x *bp,
4177 x_def_status_block); 4160 x_def_status_block);
4178 def_sb->x_def_status_block.status_block_id = sb_id; 4161 def_sb->x_def_status_block.status_block_id = sb_id;
4179 4162
4180 bp->def_x_idx = 0;
4181
4182 REG_WR(bp, BAR_XSTRORM_INTMEM + 4163 REG_WR(bp, BAR_XSTRORM_INTMEM +
4183 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section)); 4164 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4184 REG_WR(bp, BAR_XSTRORM_INTMEM + 4165 REG_WR(bp, BAR_XSTRORM_INTMEM +
4185 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4), 4166 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4186 U64_HI(section)); 4167 U64_HI(section));
4187 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF + 4168 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4188 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func); 4169 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4189 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4190 BNX2X_BTR);
4191 4170
4192 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++) 4171 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_XSTRORM_INTMEM + 4172 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4194 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1); 4173 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4195 4174
4196 bp->stats_pending = 0; 4175 bp->stats_pending = 0;
4176 bp->set_mac_pending = 0;
4197 4177
4198 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); 4178 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4199} 4179}
@@ -4209,21 +4189,25 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 /* HC_INDEX_U_ETH_RX_CQ_CONS */ 4189 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4210 REG_WR8(bp, BAR_USTRORM_INTMEM + 4190 REG_WR8(bp, BAR_USTRORM_INTMEM +
4211 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4191 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4212 HC_INDEX_U_ETH_RX_CQ_CONS), 4192 U_SB_ETH_RX_CQ_INDEX),
4213 bp->rx_ticks/12); 4193 bp->rx_ticks/12);
4214 REG_WR16(bp, BAR_USTRORM_INTMEM + 4194 REG_WR16(bp, BAR_USTRORM_INTMEM +
4215 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4195 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4216 HC_INDEX_U_ETH_RX_CQ_CONS), 4196 U_SB_ETH_RX_CQ_INDEX),
4197 bp->rx_ticks ? 0 : 1);
4198 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200 U_SB_ETH_RX_BD_INDEX),
4217 bp->rx_ticks ? 0 : 1); 4201 bp->rx_ticks ? 0 : 1);
4218 4202
4219 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4203 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4220 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4204 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4221 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id, 4205 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4222 HC_INDEX_C_ETH_TX_CQ_CONS), 4206 C_SB_ETH_TX_CQ_INDEX),
4223 bp->tx_ticks/12); 4207 bp->tx_ticks/12);
4224 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4208 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4225 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, 4209 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4226 HC_INDEX_C_ETH_TX_CQ_CONS), 4210 C_SB_ETH_TX_CQ_INDEX),
4227 bp->tx_ticks ? 0 : 1); 4211 bp->tx_ticks ? 0 : 1);
4228 } 4212 }
4229} 4213}
@@ -4256,7 +4240,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4256static void bnx2x_init_rx_rings(struct bnx2x *bp) 4240static void bnx2x_init_rx_rings(struct bnx2x *bp)
4257{ 4241{
4258 int func = BP_FUNC(bp); 4242 int func = BP_FUNC(bp);
4259 u16 ring_prod, cqe_ring_prod = 0; 4243 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244 ETH_MAX_AGGREGATION_QUEUES_E1H;
4245 u16 ring_prod, cqe_ring_prod;
4260 int i, j; 4246 int i, j;
4261 4247
4262 bp->rx_buf_use_size = bp->dev->mtu; 4248 bp->rx_buf_use_size = bp->dev->mtu;
@@ -4270,9 +4256,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4270 bp->dev->mtu + ETH_OVREHEAD); 4256 bp->dev->mtu + ETH_OVREHEAD);
4271 4257
4272 for_each_queue(bp, j) { 4258 for_each_queue(bp, j) {
4273 for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) { 4259 struct bnx2x_fastpath *fp = &bp->fp[j];
4274 struct bnx2x_fastpath *fp = &bp->fp[j];
4275 4260
4261 for (i = 0; i < max_agg_queues; i++) {
4276 fp->tpa_pool[i].skb = 4262 fp->tpa_pool[i].skb =
4277 netdev_alloc_skb(bp->dev, bp->rx_buf_size); 4263 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4278 if (!fp->tpa_pool[i].skb) { 4264 if (!fp->tpa_pool[i].skb) {
@@ -4352,8 +4338,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4352 BNX2X_ERR("disabling TPA for queue[%d]\n", j); 4338 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4353 /* Cleanup already allocated elements */ 4339 /* Cleanup already allocated elements */
4354 bnx2x_free_rx_sge_range(bp, fp, ring_prod); 4340 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4355 bnx2x_free_tpa_pool(bp, fp, 4341 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4356 ETH_MAX_AGGREGATION_QUEUES_E1H);
4357 fp->disable_tpa = 1; 4342 fp->disable_tpa = 1;
4358 ring_prod = 0; 4343 ring_prod = 0;
4359 break; 4344 break;
@@ -4363,13 +4348,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4363 fp->rx_sge_prod = ring_prod; 4348 fp->rx_sge_prod = ring_prod;
4364 4349
4365 /* Allocate BDs and initialize BD ring */ 4350 /* Allocate BDs and initialize BD ring */
4366 fp->rx_comp_cons = fp->rx_alloc_failed = 0; 4351 fp->rx_comp_cons = 0;
4367 cqe_ring_prod = ring_prod = 0; 4352 cqe_ring_prod = ring_prod = 0;
4368 for (i = 0; i < bp->rx_ring_size; i++) { 4353 for (i = 0; i < bp->rx_ring_size; i++) {
4369 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { 4354 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4370 BNX2X_ERR("was only able to allocate " 4355 BNX2X_ERR("was only able to allocate "
4371 "%d rx skbs\n", i); 4356 "%d rx skbs\n", i);
4372 fp->rx_alloc_failed++; 4357 bp->eth_stats.rx_skb_alloc_failed++;
4373 break; 4358 break;
4374 } 4359 }
4375 ring_prod = NEXT_RX_IDX(ring_prod); 4360 ring_prod = NEXT_RX_IDX(ring_prod);
@@ -4497,7 +4482,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
4497 } 4482 }
4498 4483
4499 context->cstorm_st_context.sb_index_number = 4484 context->cstorm_st_context.sb_index_number =
4500 HC_INDEX_C_ETH_TX_CQ_CONS; 4485 C_SB_ETH_TX_CQ_INDEX;
4501 context->cstorm_st_context.status_block_id = sb_id; 4486 context->cstorm_st_context.status_block_id = sb_id;
4502 4487
4503 context->xstorm_ag_context.cdu_reserved = 4488 context->xstorm_ag_context.cdu_reserved =
@@ -4535,7 +4520,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4535 int i; 4520 int i;
4536 4521
4537 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; 4522 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4538 tstorm_client.statistics_counter_id = 0; 4523 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4539 tstorm_client.config_flags = 4524 tstorm_client.config_flags =
4540 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; 4525 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4541#ifdef BCM_VLAN 4526#ifdef BCM_VLAN
@@ -4579,7 +4564,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4579 int func = BP_FUNC(bp); 4564 int func = BP_FUNC(bp);
4580 int i; 4565 int i;
4581 4566
4582 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode); 4567 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4583 4568
4584 switch (mode) { 4569 switch (mode) {
4585 case BNX2X_RX_MODE_NONE: /* no Rx */ 4570 case BNX2X_RX_MODE_NONE: /* no Rx */
@@ -4617,13 +4602,35 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4617 bnx2x_set_client_config(bp); 4602 bnx2x_set_client_config(bp);
4618} 4603}
4619 4604
4620static void bnx2x_init_internal(struct bnx2x *bp) 4605static void bnx2x_init_internal_common(struct bnx2x *bp)
4606{
4607 int i;
4608
4609 /* Zero this manually as its initialization is
4610 currently missing in the initTool */
4611 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4612 REG_WR(bp, BAR_USTRORM_INTMEM +
4613 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4614}
4615
4616static void bnx2x_init_internal_port(struct bnx2x *bp)
4617{
4618 int port = BP_PORT(bp);
4619
4620 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4624}
4625
4626static void bnx2x_init_internal_func(struct bnx2x *bp)
4621{ 4627{
4622 struct tstorm_eth_function_common_config tstorm_config = {0}; 4628 struct tstorm_eth_function_common_config tstorm_config = {0};
4623 struct stats_indication_flags stats_flags = {0}; 4629 struct stats_indication_flags stats_flags = {0};
4624 int port = BP_PORT(bp); 4630 int port = BP_PORT(bp);
4625 int func = BP_FUNC(bp); 4631 int func = BP_FUNC(bp);
4626 int i; 4632 int i;
4633 u16 max_agg_size;
4627 4634
4628 if (is_multi(bp)) { 4635 if (is_multi(bp)) {
4629 tstorm_config.config_flags = MULTI_FLAGS; 4636 tstorm_config.config_flags = MULTI_FLAGS;
@@ -4636,31 +4643,53 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4636 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func), 4643 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4637 (*(u32 *)&tstorm_config)); 4644 (*(u32 *)&tstorm_config));
4638 4645
4639/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4640 (*(u32 *)&tstorm_config)); */
4641
4642 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 4646 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4643 bnx2x_set_storm_rx_mode(bp); 4647 bnx2x_set_storm_rx_mode(bp);
4644 4648
4649 /* reset xstorm per client statistics */
4650 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4651 REG_WR(bp, BAR_XSTRORM_INTMEM +
4652 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4653 i*4, 0);
4654 }
4655 /* reset tstorm per client statistics */
4656 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4657 REG_WR(bp, BAR_TSTRORM_INTMEM +
4658 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4659 i*4, 0);
4660 }
4661
4662 /* Init statistics related context */
4645 stats_flags.collect_eth = 1; 4663 stats_flags.collect_eth = 1;
4646 4664
4647 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 4665 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4648 ((u32 *)&stats_flags)[0]); 4666 ((u32 *)&stats_flags)[0]);
4649 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4, 4667 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4650 ((u32 *)&stats_flags)[1]); 4668 ((u32 *)&stats_flags)[1]);
4651 4669
4652 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 4670 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4653 ((u32 *)&stats_flags)[0]); 4671 ((u32 *)&stats_flags)[0]);
4654 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4, 4672 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4655 ((u32 *)&stats_flags)[1]); 4673 ((u32 *)&stats_flags)[1]);
4656 4674
4657 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 4675 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4658 ((u32 *)&stats_flags)[0]); 4676 ((u32 *)&stats_flags)[0]);
4659 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4, 4677 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4660 ((u32 *)&stats_flags)[1]); 4678 ((u32 *)&stats_flags)[1]);
4661 4679
4662/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n", 4680 REG_WR(bp, BAR_XSTRORM_INTMEM +
4663 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */ 4681 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4682 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4683 REG_WR(bp, BAR_XSTRORM_INTMEM +
4684 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4685 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4686
4687 REG_WR(bp, BAR_TSTRORM_INTMEM +
4688 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4689 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4690 REG_WR(bp, BAR_TSTRORM_INTMEM +
4691 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4692 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4664 4693
4665 if (CHIP_IS_E1H(bp)) { 4694 if (CHIP_IS_E1H(bp)) {
4666 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET, 4695 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
@@ -4676,15 +4705,12 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4676 bp->e1hov); 4705 bp->e1hov);
4677 } 4706 }
4678 4707
4679 /* Zero this manualy as its initialization is 4708 /* Init CQ ring mapping and aggregation size */
4680 currently missing in the initTool */ 4709 max_agg_size = min((u32)(bp->rx_buf_use_size +
4681 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++) 4710 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4682 REG_WR(bp, BAR_USTRORM_INTMEM + 4711 (u32)0xffff);
4683 USTORM_AGG_DATA_OFFSET + 4*i, 0);
4684
4685 for_each_queue(bp, i) { 4712 for_each_queue(bp, i) {
4686 struct bnx2x_fastpath *fp = &bp->fp[i]; 4713 struct bnx2x_fastpath *fp = &bp->fp[i];
4687 u16 max_agg_size;
4688 4714
4689 REG_WR(bp, BAR_USTRORM_INTMEM + 4715 REG_WR(bp, BAR_USTRORM_INTMEM +
4690 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)), 4716 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
@@ -4693,16 +4719,34 @@ static void bnx2x_init_internal(struct bnx2x *bp)
4693 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4, 4719 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4694 U64_HI(fp->rx_comp_mapping)); 4720 U64_HI(fp->rx_comp_mapping));
4695 4721
4696 max_agg_size = min((u32)(bp->rx_buf_use_size +
4697 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4698 (u32)0xffff);
4699 REG_WR16(bp, BAR_USTRORM_INTMEM + 4722 REG_WR16(bp, BAR_USTRORM_INTMEM +
4700 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)), 4723 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4701 max_agg_size); 4724 max_agg_size);
4702 } 4725 }
4703} 4726}
4704 4727
4705static void bnx2x_nic_init(struct bnx2x *bp) 4728static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4729{
4730 switch (load_code) {
4731 case FW_MSG_CODE_DRV_LOAD_COMMON:
4732 bnx2x_init_internal_common(bp);
4733 /* no break */
4734
4735 case FW_MSG_CODE_DRV_LOAD_PORT:
4736 bnx2x_init_internal_port(bp);
4737 /* no break */
4738
4739 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4740 bnx2x_init_internal_func(bp);
4741 break;
4742
4743 default:
4744 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4745 break;
4746 }
4747}
4748
4749static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4706{ 4750{
4707 int i; 4751 int i;
4708 4752
@@ -4717,19 +4761,20 @@ static void bnx2x_nic_init(struct bnx2x *bp)
4717 DP(NETIF_MSG_IFUP, 4761 DP(NETIF_MSG_IFUP,
4718 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n", 4762 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4719 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp)); 4763 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4720 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk, 4764 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4721 fp->status_blk_mapping); 4765 FP_SB_ID(fp));
4766 bnx2x_update_fpsb_idx(fp);
4722 } 4767 }
4723 4768
4724 bnx2x_init_def_sb(bp, bp->def_status_blk, 4769 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4725 bp->def_status_blk_mapping, DEF_SB_ID); 4770 DEF_SB_ID);
4771 bnx2x_update_dsb_idx(bp);
4726 bnx2x_update_coalesce(bp); 4772 bnx2x_update_coalesce(bp);
4727 bnx2x_init_rx_rings(bp); 4773 bnx2x_init_rx_rings(bp);
4728 bnx2x_init_tx_ring(bp); 4774 bnx2x_init_tx_ring(bp);
4729 bnx2x_init_sp_ring(bp); 4775 bnx2x_init_sp_ring(bp);
4730 bnx2x_init_context(bp); 4776 bnx2x_init_context(bp);
4731 bnx2x_init_internal(bp); 4777 bnx2x_init_internal(bp, load_code);
4732 bnx2x_storm_stats_init(bp);
4733 bnx2x_init_ind_table(bp); 4778 bnx2x_init_ind_table(bp);
4734 bnx2x_int_enable(bp); 4779 bnx2x_int_enable(bp);
4735} 4780}
@@ -4878,7 +4923,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4878 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4923 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4879 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4924 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4880 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4925 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4881 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4926 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4882 4927
4883 /* Write 0 to parser credits for CFC search request */ 4928 /* Write 0 to parser credits for CFC search request */
4884 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4929 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -4933,7 +4978,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4933 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0); 4978 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4934 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0); 4979 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4935 REG_WR(bp, CFC_REG_DEBUG0, 0x1); 4980 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4936 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0); 4981 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4937 4982
4938 /* Write 0 to parser credits for CFC search request */ 4983 /* Write 0 to parser credits for CFC search request */
4939 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); 4984 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
@@ -5000,7 +5045,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
5000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 5045 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1); 5046 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5002 REG_WR(bp, CFC_REG_DEBUG0, 0x0); 5047 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5003 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1); 5048 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5004 5049
5005 DP(NETIF_MSG_HW, "done\n"); 5050 DP(NETIF_MSG_HW, "done\n");
5006 5051
@@ -5089,11 +5134,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5089 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); 5134 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5090#endif 5135#endif
5091 5136
5092#ifndef BCM_ISCSI
5093 /* set NIC mode */
5094 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5095#endif
5096
5097 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 5137 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5098#ifdef BCM_ISCSI 5138#ifdef BCM_ISCSI
5099 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); 5139 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
@@ -5163,6 +5203,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
5163 } 5203 }
5164 5204
5165 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); 5205 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5206 /* set NIC mode */
5207 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5166 if (CHIP_IS_E1H(bp)) 5208 if (CHIP_IS_E1H(bp))
5167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 5209 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5168 5210
@@ -5333,6 +5375,13 @@ static int bnx2x_init_common(struct bnx2x *bp)
5333 ((u32 *)&tmp)[1]); 5375 ((u32 *)&tmp)[1]);
5334 } 5376 }
5335 5377
5378 if (!BP_NOMCP(bp)) {
5379 bnx2x_acquire_phy_lock(bp);
5380 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5381 bnx2x_release_phy_lock(bp);
5382 } else
5383 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5384
5336 return 0; 5385 return 0;
5337} 5386}
5338 5387
@@ -5638,18 +5687,23 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5638 int func = BP_FUNC(bp); 5687 int func = BP_FUNC(bp);
5639 u32 seq = ++bp->fw_seq; 5688 u32 seq = ++bp->fw_seq;
5640 u32 rc = 0; 5689 u32 rc = 0;
5690 u32 cnt = 1;
5691 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5641 5692
5642 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 5693 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5643 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 5694 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5644 5695
5645 /* let the FW do it's magic ... */ 5696 do {
5646 msleep(100); /* TBD */ 5697 /* let the FW do it's magic ... */
5698 msleep(delay);
5647 5699
5648 if (CHIP_REV_IS_SLOW(bp)) 5700 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5649 msleep(900);
5650 5701
5651 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 5702 /* Give the FW up to 2 second (200*10ms) */
5652 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); 5703 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5704
5705 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5706 cnt*delay, rc, seq);
5653 5707
5654 /* is this a reply to our command? */ 5708 /* is this a reply to our command? */
5655 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { 5709 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -5713,6 +5767,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
5713 NUM_RCQ_BD); 5767 NUM_RCQ_BD);
5714 5768
5715 /* SGE ring */ 5769 /* SGE ring */
5770 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5716 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring), 5771 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5717 bnx2x_fp(bp, i, rx_sge_mapping), 5772 bnx2x_fp(bp, i, rx_sge_mapping),
5718 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 5773 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5890,7 +5945,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5890 dev_kfree_skb(skb); 5945 dev_kfree_skb(skb);
5891 } 5946 }
5892 if (!fp->disable_tpa) 5947 if (!fp->disable_tpa)
5893 bnx2x_free_tpa_pool(bp, fp, 5948 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5949 ETH_MAX_AGGREGATION_QUEUES_E1 :
5894 ETH_MAX_AGGREGATION_QUEUES_E1H); 5950 ETH_MAX_AGGREGATION_QUEUES_E1H);
5895 } 5951 }
5896} 5952}
@@ -5976,8 +6032,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5976 bnx2x_msix_fp_int, 0, 6032 bnx2x_msix_fp_int, 0,
5977 bp->dev->name, &bp->fp[i]); 6033 bp->dev->name, &bp->fp[i]);
5978 if (rc) { 6034 if (rc) {
5979 BNX2X_ERR("request fp #%d irq failed rc %d\n", 6035 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
5980 i + offset, rc); 6036 i + offset, -rc);
5981 bnx2x_free_msix_irqs(bp); 6037 bnx2x_free_msix_irqs(bp);
5982 return -EBUSY; 6038 return -EBUSY;
5983 } 6039 }
@@ -6004,7 +6060,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
6004 * Init service functions 6060 * Init service functions
6005 */ 6061 */
6006 6062
6007static void bnx2x_set_mac_addr_e1(struct bnx2x *bp) 6063static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6008{ 6064{
6009 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 6065 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6010 int port = BP_PORT(bp); 6066 int port = BP_PORT(bp);
@@ -6026,11 +6082,15 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6026 config->config_table[0].cam_entry.lsb_mac_addr = 6082 config->config_table[0].cam_entry.lsb_mac_addr =
6027 swab16(*(u16 *)&bp->dev->dev_addr[4]); 6083 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6028 config->config_table[0].cam_entry.flags = cpu_to_le16(port); 6084 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6029 config->config_table[0].target_table_entry.flags = 0; 6085 if (set)
6086 config->config_table[0].target_table_entry.flags = 0;
6087 else
6088 CAM_INVALIDATE(config->config_table[0]);
6030 config->config_table[0].target_table_entry.client_id = 0; 6089 config->config_table[0].target_table_entry.client_id = 0;
6031 config->config_table[0].target_table_entry.vlan_id = 0; 6090 config->config_table[0].target_table_entry.vlan_id = 0;
6032 6091
6033 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n", 6092 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6093 (set ? "setting" : "clearing"),
6034 config->config_table[0].cam_entry.msb_mac_addr, 6094 config->config_table[0].cam_entry.msb_mac_addr,
6035 config->config_table[0].cam_entry.middle_mac_addr, 6095 config->config_table[0].cam_entry.middle_mac_addr,
6036 config->config_table[0].cam_entry.lsb_mac_addr); 6096 config->config_table[0].cam_entry.lsb_mac_addr);
@@ -6040,8 +6100,11 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6040 config->config_table[1].cam_entry.middle_mac_addr = 0xffff; 6100 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6041 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff; 6101 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6042 config->config_table[1].cam_entry.flags = cpu_to_le16(port); 6102 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6043 config->config_table[1].target_table_entry.flags = 6103 if (set)
6104 config->config_table[1].target_table_entry.flags =
6044 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 6105 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6106 else
6107 CAM_INVALIDATE(config->config_table[1]);
6045 config->config_table[1].target_table_entry.client_id = 0; 6108 config->config_table[1].target_table_entry.client_id = 0;
6046 config->config_table[1].target_table_entry.vlan_id = 0; 6109 config->config_table[1].target_table_entry.vlan_id = 0;
6047 6110
@@ -6050,12 +6113,12 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6050 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 6113 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6051} 6114}
6052 6115
6053static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp) 6116static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6054{ 6117{
6055 struct mac_configuration_cmd_e1h *config = 6118 struct mac_configuration_cmd_e1h *config =
6056 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 6119 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6057 6120
6058 if (bp->state != BNX2X_STATE_OPEN) { 6121 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6059 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); 6122 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6060 return; 6123 return;
6061 } 6124 }
@@ -6079,9 +6142,14 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6079 config->config_table[0].client_id = BP_L_ID(bp); 6142 config->config_table[0].client_id = BP_L_ID(bp);
6080 config->config_table[0].vlan_id = 0; 6143 config->config_table[0].vlan_id = 0;
6081 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 6144 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6082 config->config_table[0].flags = BP_PORT(bp); 6145 if (set)
6146 config->config_table[0].flags = BP_PORT(bp);
6147 else
6148 config->config_table[0].flags =
6149 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6083 6150
6084 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", 6151 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6152 (set ? "setting" : "clearing"),
6085 config->config_table[0].msb_mac_addr, 6153 config->config_table[0].msb_mac_addr,
6086 config->config_table[0].middle_mac_addr, 6154 config->config_table[0].middle_mac_addr,
6087 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); 6155 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
@@ -6106,13 +6174,13 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6106 bnx2x_rx_int(bp->fp, 10); 6174 bnx2x_rx_int(bp->fp, 10);
6107 /* if index is different from 0 6175 /* if index is different from 0
6108 * the reply for some commands will 6176 * the reply for some commands will
6109 * be on the none default queue 6177 * be on the non default queue
6110 */ 6178 */
6111 if (idx) 6179 if (idx)
6112 bnx2x_rx_int(&bp->fp[idx], 10); 6180 bnx2x_rx_int(&bp->fp[idx], 10);
6113 } 6181 }
6114 mb(); /* state is changed by bnx2x_sp_event() */
6115 6182
6183 mb(); /* state is changed by bnx2x_sp_event() */
6116 if (*state_p == state) 6184 if (*state_p == state)
6117 return 0; 6185 return 0;
6118 6186
@@ -6167,7 +6235,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6167{ 6235{
6168 u32 load_code; 6236 u32 load_code;
6169 int i, rc; 6237 int i, rc;
6170
6171#ifdef BNX2X_STOP_ON_ERROR 6238#ifdef BNX2X_STOP_ON_ERROR
6172 if (unlikely(bp->panic)) 6239 if (unlikely(bp->panic))
6173 return -EPERM; 6240 return -EPERM;
@@ -6183,22 +6250,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6183 if (!BP_NOMCP(bp)) { 6250 if (!BP_NOMCP(bp)) {
6184 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); 6251 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6185 if (!load_code) { 6252 if (!load_code) {
6186 BNX2X_ERR("MCP response failure, unloading\n"); 6253 BNX2X_ERR("MCP response failure, aborting\n");
6187 return -EBUSY; 6254 return -EBUSY;
6188 } 6255 }
6189 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) 6256 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6190 return -EBUSY; /* other port in diagnostic mode */ 6257 return -EBUSY; /* other port in diagnostic mode */
6191 6258
6192 } else { 6259 } else {
6260 int port = BP_PORT(bp);
6261
6193 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n", 6262 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6194 load_count[0], load_count[1], load_count[2]); 6263 load_count[0], load_count[1], load_count[2]);
6195 load_count[0]++; 6264 load_count[0]++;
6196 load_count[1 + BP_PORT(bp)]++; 6265 load_count[1 + port]++;
6197 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n", 6266 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6198 load_count[0], load_count[1], load_count[2]); 6267 load_count[0], load_count[1], load_count[2]);
6199 if (load_count[0] == 1) 6268 if (load_count[0] == 1)
6200 load_code = FW_MSG_CODE_DRV_LOAD_COMMON; 6269 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6201 else if (load_count[1 + BP_PORT(bp)] == 1) 6270 else if (load_count[1 + port] == 1)
6202 load_code = FW_MSG_CODE_DRV_LOAD_PORT; 6271 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6203 else 6272 else
6204 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION; 6273 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
@@ -6247,9 +6316,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6247 bnx2x_fp(bp, i, disable_tpa) = 6316 bnx2x_fp(bp, i, disable_tpa) =
6248 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6317 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6249 6318
6250 /* Disable interrupt handling until HW is initialized */
6251 atomic_set(&bp->intr_sem, 1);
6252
6253 if (bp->flags & USING_MSIX_FLAG) { 6319 if (bp->flags & USING_MSIX_FLAG) {
6254 rc = bnx2x_req_msix_irqs(bp); 6320 rc = bnx2x_req_msix_irqs(bp);
6255 if (rc) { 6321 if (rc) {
@@ -6276,17 +6342,14 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6276 goto load_error; 6342 goto load_error;
6277 } 6343 }
6278 6344
6279 /* Enable interrupt handling */
6280 atomic_set(&bp->intr_sem, 0);
6281
6282 /* Setup NIC internals and enable interrupts */ 6345 /* Setup NIC internals and enable interrupts */
6283 bnx2x_nic_init(bp); 6346 bnx2x_nic_init(bp, load_code);
6284 6347
6285 /* Send LOAD_DONE command to MCP */ 6348 /* Send LOAD_DONE command to MCP */
6286 if (!BP_NOMCP(bp)) { 6349 if (!BP_NOMCP(bp)) {
6287 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); 6350 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6288 if (!load_code) { 6351 if (!load_code) {
6289 BNX2X_ERR("MCP response failure, unloading\n"); 6352 BNX2X_ERR("MCP response failure, aborting\n");
6290 rc = -EBUSY; 6353 rc = -EBUSY;
6291 goto load_int_disable; 6354 goto load_int_disable;
6292 } 6355 }
@@ -6301,11 +6364,12 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6301 for_each_queue(bp, i) 6364 for_each_queue(bp, i)
6302 napi_enable(&bnx2x_fp(bp, i, napi)); 6365 napi_enable(&bnx2x_fp(bp, i, napi));
6303 6366
6367 /* Enable interrupt handling */
6368 atomic_set(&bp->intr_sem, 0);
6369
6304 rc = bnx2x_setup_leading(bp); 6370 rc = bnx2x_setup_leading(bp);
6305 if (rc) { 6371 if (rc) {
6306#ifdef BNX2X_STOP_ON_ERROR 6372 BNX2X_ERR("Setup leading failed!\n");
6307 bp->panic = 1;
6308#endif
6309 goto load_stop_netif; 6373 goto load_stop_netif;
6310 } 6374 }
6311 6375
@@ -6323,9 +6387,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6323 } 6387 }
6324 6388
6325 if (CHIP_IS_E1(bp)) 6389 if (CHIP_IS_E1(bp))
6326 bnx2x_set_mac_addr_e1(bp); 6390 bnx2x_set_mac_addr_e1(bp, 1);
6327 else 6391 else
6328 bnx2x_set_mac_addr_e1h(bp); 6392 bnx2x_set_mac_addr_e1h(bp, 1);
6329 6393
6330 if (bp->port.pmf) 6394 if (bp->port.pmf)
6331 bnx2x_initial_phy_init(bp); 6395 bnx2x_initial_phy_init(bp);
@@ -6339,7 +6403,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6339 break; 6403 break;
6340 6404
6341 case LOAD_OPEN: 6405 case LOAD_OPEN:
6342 /* IRQ is only requested from bnx2x_open */
6343 netif_start_queue(bp->dev); 6406 netif_start_queue(bp->dev);
6344 bnx2x_set_rx_mode(bp->dev); 6407 bnx2x_set_rx_mode(bp->dev);
6345 if (bp->flags & USING_MSIX_FLAG) 6408 if (bp->flags & USING_MSIX_FLAG)
@@ -6378,8 +6441,7 @@ load_int_disable:
6378 /* Free SKBs, SGEs, TPA pool and driver internals */ 6441 /* Free SKBs, SGEs, TPA pool and driver internals */
6379 bnx2x_free_skbs(bp); 6442 bnx2x_free_skbs(bp);
6380 for_each_queue(bp, i) 6443 for_each_queue(bp, i)
6381 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6444 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6382 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6383load_error: 6445load_error:
6384 bnx2x_free_mem(bp); 6446 bnx2x_free_mem(bp);
6385 6447
@@ -6411,7 +6473,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6411 return rc; 6473 return rc;
6412} 6474}
6413 6475
6414static void bnx2x_stop_leading(struct bnx2x *bp) 6476static int bnx2x_stop_leading(struct bnx2x *bp)
6415{ 6477{
6416 u16 dsb_sp_prod_idx; 6478 u16 dsb_sp_prod_idx;
6417 /* if the other port is handling traffic, 6479 /* if the other port is handling traffic,
@@ -6429,7 +6491,7 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6429 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0, 6491 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6430 &(bp->fp[0].state), 1); 6492 &(bp->fp[0].state), 1);
6431 if (rc) /* timeout */ 6493 if (rc) /* timeout */
6432 return; 6494 return rc;
6433 6495
6434 dsb_sp_prod_idx = *bp->dsb_sp_prod; 6496 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6435 6497
@@ -6441,20 +6503,24 @@ static void bnx2x_stop_leading(struct bnx2x *bp)
6441 so there is not much to do if this times out 6503 so there is not much to do if this times out
6442 */ 6504 */
6443 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) { 6505 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6444 msleep(1);
6445 if (!cnt) { 6506 if (!cnt) {
6446 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del " 6507 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6447 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", 6508 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6448 *bp->dsb_sp_prod, dsb_sp_prod_idx); 6509 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6449#ifdef BNX2X_STOP_ON_ERROR 6510#ifdef BNX2X_STOP_ON_ERROR
6450 bnx2x_panic(); 6511 bnx2x_panic();
6512#else
6513 rc = -EBUSY;
6451#endif 6514#endif
6452 break; 6515 break;
6453 } 6516 }
6454 cnt--; 6517 cnt--;
6518 msleep(1);
6455 } 6519 }
6456 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6520 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6457 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6521 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6522
6523 return rc;
6458} 6524}
6459 6525
6460static void bnx2x_reset_func(struct bnx2x *bp) 6526static void bnx2x_reset_func(struct bnx2x *bp)
@@ -6496,7 +6562,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
6496 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); 6562 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6497 if (val) 6563 if (val)
6498 DP(NETIF_MSG_IFDOWN, 6564 DP(NETIF_MSG_IFDOWN,
6499 "BRB1 is not empty %d blooks are occupied\n", val); 6565 "BRB1 is not empty %d blocks are occupied\n", val);
6500 6566
6501 /* TODO: Close Doorbell port? */ 6567 /* TODO: Close Doorbell port? */
6502} 6568}
@@ -6536,11 +6602,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6536 } 6602 }
6537} 6603}
6538 6604
6539/* msut be called with rtnl_lock */ 6605/* must be called with rtnl_lock */
6540static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) 6606static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6541{ 6607{
6608 int port = BP_PORT(bp);
6542 u32 reset_code = 0; 6609 u32 reset_code = 0;
6543 int i, cnt; 6610 int i, cnt, rc;
6544 6611
6545 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 6612 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6546 6613
@@ -6557,22 +6624,17 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6557 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6624 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6558 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6625 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6559 6626
6560 /* Wait until all fast path tasks complete */ 6627 /* Wait until tx fast path tasks complete */
6561 for_each_queue(bp, i) { 6628 for_each_queue(bp, i) {
6562 struct bnx2x_fastpath *fp = &bp->fp[i]; 6629 struct bnx2x_fastpath *fp = &bp->fp[i];
6563 6630
6564#ifdef BNX2X_STOP_ON_ERROR
6565#ifdef __powerpc64__
6566 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6567#else
6568 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6569#endif
6570 fp->tpa_queue_used);
6571#endif
6572 cnt = 1000; 6631 cnt = 1000;
6573 smp_rmb(); 6632 smp_rmb();
6574 while (bnx2x_has_work(fp)) { 6633 while (BNX2X_HAS_TX_WORK(fp)) {
6575 msleep(1); 6634
6635 if (!netif_running(bp->dev))
6636 bnx2x_tx_int(fp, 1000);
6637
6576 if (!cnt) { 6638 if (!cnt) {
6577 BNX2X_ERR("timeout waiting for queue[%d]\n", 6639 BNX2X_ERR("timeout waiting for queue[%d]\n",
6578 i); 6640 i);
@@ -6584,14 +6646,13 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6584#endif 6646#endif
6585 } 6647 }
6586 cnt--; 6648 cnt--;
6649 msleep(1);
6587 smp_rmb(); 6650 smp_rmb();
6588 } 6651 }
6589 } 6652 }
6590 6653
6591 /* Wait until all slow path tasks complete */ 6654 /* Give HW time to discard old tx messages */
6592 cnt = 1000; 6655 msleep(1);
6593 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6594 msleep(1);
6595 6656
6596 for_each_queue(bp, i) 6657 for_each_queue(bp, i)
6597 napi_disable(&bnx2x_fp(bp, i, napi)); 6658 napi_disable(&bnx2x_fp(bp, i, napi));
@@ -6601,52 +6662,79 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6601 /* Release IRQs */ 6662 /* Release IRQs */
6602 bnx2x_free_irq(bp); 6663 bnx2x_free_irq(bp);
6603 6664
6604 if (bp->flags & NO_WOL_FLAG) 6665 if (unload_mode == UNLOAD_NORMAL)
6666 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6667
6668 else if (bp->flags & NO_WOL_FLAG) {
6605 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; 6669 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6670 if (CHIP_IS_E1H(bp))
6671 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6606 6672
6607 else if (bp->wol) { 6673 } else if (bp->wol) {
6608 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 6674 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6609 u8 *mac_addr = bp->dev->dev_addr; 6675 u8 *mac_addr = bp->dev->dev_addr;
6610 u32 val; 6676 u32 val;
6611
6612 /* The mac address is written to entries 1-4 to 6677 /* The mac address is written to entries 1-4 to
6613 preserve entry 0 which is used by the PMF */ 6678 preserve entry 0 which is used by the PMF */
6679 u8 entry = (BP_E1HVN(bp) + 1)*8;
6680
6614 val = (mac_addr[0] << 8) | mac_addr[1]; 6681 val = (mac_addr[0] << 8) | mac_addr[1];
6615 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val); 6682 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6616 6683
6617 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 6684 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6618 (mac_addr[4] << 8) | mac_addr[5]; 6685 (mac_addr[4] << 8) | mac_addr[5];
6619 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4, 6686 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6620 val);
6621 6687
6622 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; 6688 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6623 6689
6624 } else 6690 } else
6625 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6691 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6626 6692
6693 if (CHIP_IS_E1(bp)) {
6694 struct mac_configuration_cmd *config =
6695 bnx2x_sp(bp, mcast_config);
6696
6697 bnx2x_set_mac_addr_e1(bp, 0);
6698
6699 for (i = 0; i < config->hdr.length_6b; i++)
6700 CAM_INVALIDATE(config->config_table[i]);
6701
6702 config->hdr.length_6b = i;
6703 if (CHIP_REV_IS_SLOW(bp))
6704 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6705 else
6706 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6707 config->hdr.client_id = BP_CL_ID(bp);
6708 config->hdr.reserved1 = 0;
6709
6710 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6711 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6712 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6713
6714 } else { /* E1H */
6715 bnx2x_set_mac_addr_e1h(bp, 0);
6716
6717 for (i = 0; i < MC_HASH_SIZE; i++)
6718 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6719 }
6720
6721 if (CHIP_IS_E1H(bp))
6722 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6723
6627 /* Close multi and leading connections 6724 /* Close multi and leading connections
6628 Completions for ramrods are collected in a synchronous way */ 6725 Completions for ramrods are collected in a synchronous way */
6629 for_each_nondefault_queue(bp, i) 6726 for_each_nondefault_queue(bp, i)
6630 if (bnx2x_stop_multi(bp, i)) 6727 if (bnx2x_stop_multi(bp, i))
6631 goto unload_error; 6728 goto unload_error;
6632 6729
6633 if (CHIP_IS_E1H(bp)) 6730 rc = bnx2x_stop_leading(bp);
6634 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0); 6731 if (rc) {
6635
6636 bnx2x_stop_leading(bp);
6637#ifdef BNX2X_STOP_ON_ERROR
6638 /* If ramrod completion timed out - break here! */
6639 if (bp->panic) {
6640 BNX2X_ERR("Stop leading failed!\n"); 6732 BNX2X_ERR("Stop leading failed!\n");
6733#ifdef BNX2X_STOP_ON_ERROR
6641 return -EBUSY; 6734 return -EBUSY;
6642 } 6735#else
6736 goto unload_error;
6643#endif 6737#endif
6644
6645 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6646 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6647 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6648 "state 0x%x fp[0].state 0x%x\n",
6649 bp->state, bp->fp[0].state);
6650 } 6738 }
6651 6739
6652unload_error: 6740unload_error:
@@ -6656,12 +6744,12 @@ unload_error:
6656 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n", 6744 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6657 load_count[0], load_count[1], load_count[2]); 6745 load_count[0], load_count[1], load_count[2]);
6658 load_count[0]--; 6746 load_count[0]--;
6659 load_count[1 + BP_PORT(bp)]--; 6747 load_count[1 + port]--;
6660 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n", 6748 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6661 load_count[0], load_count[1], load_count[2]); 6749 load_count[0], load_count[1], load_count[2]);
6662 if (load_count[0] == 0) 6750 if (load_count[0] == 0)
6663 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; 6751 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6664 else if (load_count[1 + BP_PORT(bp)] == 0) 6752 else if (load_count[1 + port] == 0)
6665 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT; 6753 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6666 else 6754 else
6667 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION; 6755 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
@@ -6681,8 +6769,7 @@ unload_error:
6681 /* Free SKBs, SGEs, TPA pool and driver internals */ 6769 /* Free SKBs, SGEs, TPA pool and driver internals */
6682 bnx2x_free_skbs(bp); 6770 bnx2x_free_skbs(bp);
6683 for_each_queue(bp, i) 6771 for_each_queue(bp, i)
6684 bnx2x_free_rx_sge_range(bp, bp->fp + i, 6772 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6685 RX_SGE_CNT*NUM_RX_SGE_PAGES);
6686 bnx2x_free_mem(bp); 6773 bnx2x_free_mem(bp);
6687 6774
6688 bp->state = BNX2X_STATE_CLOSED; 6775 bp->state = BNX2X_STATE_CLOSED;
@@ -6733,56 +6820,93 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6733 /* Check if it is the UNDI driver 6820 /* Check if it is the UNDI driver
6734 * UNDI driver initializes CID offset for normal bell to 0x7 6821 * UNDI driver initializes CID offset for normal bell to 0x7
6735 */ 6822 */
6823 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6736 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6824 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6737 if (val == 0x7) { 6825 if (val == 0x7) {
6738 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6826 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6739 /* save our func and fw_seq */ 6827 /* save our func */
6740 int func = BP_FUNC(bp); 6828 int func = BP_FUNC(bp);
6741 u16 fw_seq = bp->fw_seq; 6829 u32 swap_en;
6830 u32 swap_val;
6742 6831
6743 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6832 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6744 6833
6745 /* try unload UNDI on port 0 */ 6834 /* try unload UNDI on port 0 */
6746 bp->func = 0; 6835 bp->func = 0;
6747 bp->fw_seq = (SHMEM_RD(bp, 6836 bp->fw_seq =
6748 func_mb[bp->func].drv_mb_header) & 6837 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6749 DRV_MSG_SEQ_NUMBER_MASK); 6838 DRV_MSG_SEQ_NUMBER_MASK);
6750
6751 reset_code = bnx2x_fw_command(bp, reset_code); 6839 reset_code = bnx2x_fw_command(bp, reset_code);
6752 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6753 6840
6754 /* if UNDI is loaded on the other port */ 6841 /* if UNDI is loaded on the other port */
6755 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) { 6842 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6756 6843
6844 /* send "DONE" for previous unload */
6845 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6846
6847 /* unload UNDI on port 1 */
6757 bp->func = 1; 6848 bp->func = 1;
6758 bp->fw_seq = (SHMEM_RD(bp, 6849 bp->fw_seq =
6759 func_mb[bp->func].drv_mb_header) & 6850 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6760 DRV_MSG_SEQ_NUMBER_MASK); 6851 DRV_MSG_SEQ_NUMBER_MASK);
6761 6852 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6762 bnx2x_fw_command(bp, 6853
6763 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS); 6854 bnx2x_fw_command(bp, reset_code);
6764 bnx2x_fw_command(bp,
6765 DRV_MSG_CODE_UNLOAD_DONE);
6766
6767 /* restore our func and fw_seq */
6768 bp->func = func;
6769 bp->fw_seq = fw_seq;
6770 } 6855 }
6771 6856
6857 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6858 HC_REG_CONFIG_0), 0x1000);
6859
6860 /* close input traffic and wait for it */
6861 /* Do not rcv packets to BRB */
6862 REG_WR(bp,
6863 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6864 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6865 /* Do not direct rcv packets that are not for MCP to
6866 * the BRB */
6867 REG_WR(bp,
6868 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6869 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6870 /* clear AEU */
6871 REG_WR(bp,
6872 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6873 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6874 msleep(10);
6875
6876 /* save NIG port swap info */
6877 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6878 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6772 /* reset device */ 6879 /* reset device */
6773 REG_WR(bp, 6880 REG_WR(bp,
6774 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 6881 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6775 0xd3ffff7f); 6882 0xd3ffffff);
6776 REG_WR(bp, 6883 REG_WR(bp,
6777 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 6884 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6778 0x1403); 6885 0x1403);
6886 /* take the NIG out of reset and restore swap values */
6887 REG_WR(bp,
6888 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6889 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6890 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6891 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6892
6893 /* send unload done to the MCP */
6894 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6895
6896 /* restore our func and fw_seq */
6897 bp->func = func;
6898 bp->fw_seq =
6899 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6900 DRV_MSG_SEQ_NUMBER_MASK);
6779 } 6901 }
6902 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6780 } 6903 }
6781} 6904}
6782 6905
6783static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 6906static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6784{ 6907{
6785 u32 val, val2, val3, val4, id; 6908 u32 val, val2, val3, val4, id;
6909 u16 pmc;
6786 6910
6787 /* Get the chip revision id and number. */ 6911 /* Get the chip revision id and number. */
6788 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 6912 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
@@ -6840,8 +6964,16 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6840 BNX2X_ERR("This driver needs bc_ver %X but found %X," 6964 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6841 " please upgrade BC\n", BNX2X_BC_VER, val); 6965 " please upgrade BC\n", BNX2X_BC_VER, val);
6842 } 6966 }
6843 BNX2X_DEV_INFO("%sWoL Capable\n", 6967
6844 (bp->flags & NO_WOL_FLAG)? "Not " : ""); 6968 if (BP_E1HVN(bp) == 0) {
6969 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6970 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6971 } else {
6972 /* no WOL capability for E1HVN != 0 */
6973 bp->flags |= NO_WOL_FLAG;
6974 }
6975 BNX2X_DEV_INFO("%sWoL capable\n",
6976 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6845 6977
6846 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num); 6978 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6847 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]); 6979 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
@@ -7274,9 +7406,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7274 bp->mf_config = 7406 bp->mf_config =
7275 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 7407 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7276 7408
7277 val = 7409 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7278 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) & 7410 FUNC_MF_CFG_E1HOV_TAG_MASK);
7279 FUNC_MF_CFG_E1HOV_TAG_MASK);
7280 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 7411 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7281 7412
7282 bp->e1hov = val; 7413 bp->e1hov = val;
@@ -7324,7 +7455,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7324 7455
7325 if (BP_NOMCP(bp)) { 7456 if (BP_NOMCP(bp)) {
7326 /* only supposed to happen on emulation/FPGA */ 7457 /* only supposed to happen on emulation/FPGA */
7327 BNX2X_ERR("warning rendom MAC workaround active\n"); 7458 BNX2X_ERR("warning random MAC workaround active\n");
7328 random_ether_addr(bp->dev->dev_addr); 7459 random_ether_addr(bp->dev->dev_addr);
7329 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 7460 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7330 } 7461 }
@@ -7337,8 +7468,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7337 int func = BP_FUNC(bp); 7468 int func = BP_FUNC(bp);
7338 int rc; 7469 int rc;
7339 7470
7340 if (nomcp) 7471 /* Disable interrupt handling until HW is initialized */
7341 bp->flags |= NO_MCP_FLAG; 7472 atomic_set(&bp->intr_sem, 1);
7342 7473
7343 mutex_init(&bp->port.phy_mutex); 7474 mutex_init(&bp->port.phy_mutex);
7344 7475
@@ -7377,8 +7508,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7377 bp->tx_ticks = 50; 7508 bp->tx_ticks = 50;
7378 bp->rx_ticks = 25; 7509 bp->rx_ticks = 25;
7379 7510
7380 bp->stats_ticks = 1000000 & 0xffff00;
7381
7382 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 7511 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7383 bp->current_interval = (poll ? poll : bp->timer_interval); 7512 bp->current_interval = (poll ? poll : bp->timer_interval);
7384 7513
@@ -7628,25 +7757,25 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
7628 struct ethtool_drvinfo *info) 7757 struct ethtool_drvinfo *info)
7629{ 7758{
7630 struct bnx2x *bp = netdev_priv(dev); 7759 struct bnx2x *bp = netdev_priv(dev);
7631 char phy_fw_ver[PHY_FW_VER_LEN]; 7760 u8 phy_fw_ver[PHY_FW_VER_LEN];
7632 7761
7633 strcpy(info->driver, DRV_MODULE_NAME); 7762 strcpy(info->driver, DRV_MODULE_NAME);
7634 strcpy(info->version, DRV_MODULE_VERSION); 7763 strcpy(info->version, DRV_MODULE_VERSION);
7635 7764
7636 phy_fw_ver[0] = '\0'; 7765 phy_fw_ver[0] = '\0';
7637 if (bp->port.pmf) { 7766 if (bp->port.pmf) {
7638 bnx2x_phy_hw_lock(bp); 7767 bnx2x_acquire_phy_lock(bp);
7639 bnx2x_get_ext_phy_fw_version(&bp->link_params, 7768 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7640 (bp->state != BNX2X_STATE_CLOSED), 7769 (bp->state != BNX2X_STATE_CLOSED),
7641 phy_fw_ver, PHY_FW_VER_LEN); 7770 phy_fw_ver, PHY_FW_VER_LEN);
7642 bnx2x_phy_hw_unlock(bp); 7771 bnx2x_release_phy_lock(bp);
7643 } 7772 }
7644 7773
7645 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s", 7774 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7646 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, 7775 (bp->common.bc_ver & 0xff0000) >> 16,
7647 BCM_5710_FW_REVISION_VERSION, 7776 (bp->common.bc_ver & 0xff00) >> 8,
7648 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver, 7777 (bp->common.bc_ver & 0xff),
7649 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver); 7778 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7650 strcpy(info->bus_info, pci_name(bp->pdev)); 7779 strcpy(info->bus_info, pci_name(bp->pdev));
7651 info->n_stats = BNX2X_NUM_STATS; 7780 info->n_stats = BNX2X_NUM_STATS;
7652 info->testinfo_len = BNX2X_NUM_TESTS; 7781 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -8097,7 +8226,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8097 if (eeprom->magic == 0x00504859) 8226 if (eeprom->magic == 0x00504859)
8098 if (bp->port.pmf) { 8227 if (bp->port.pmf) {
8099 8228
8100 bnx2x_phy_hw_lock(bp); 8229 bnx2x_acquire_phy_lock(bp);
8101 rc = bnx2x_flash_download(bp, BP_PORT(bp), 8230 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8102 bp->link_params.ext_phy_config, 8231 bp->link_params.ext_phy_config,
8103 (bp->state != BNX2X_STATE_CLOSED), 8232 (bp->state != BNX2X_STATE_CLOSED),
@@ -8109,7 +8238,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
8109 rc |= bnx2x_phy_init(&bp->link_params, 8238 rc |= bnx2x_phy_init(&bp->link_params,
8110 &bp->link_vars); 8239 &bp->link_vars);
8111 } 8240 }
8112 bnx2x_phy_hw_unlock(bp); 8241 bnx2x_release_phy_lock(bp);
8113 8242
8114 } else /* Only the PMF can access the PHY */ 8243 } else /* Only the PMF can access the PHY */
8115 return -EINVAL; 8244 return -EINVAL;
@@ -8128,7 +8257,6 @@ static int bnx2x_get_coalesce(struct net_device *dev,
8128 8257
8129 coal->rx_coalesce_usecs = bp->rx_ticks; 8258 coal->rx_coalesce_usecs = bp->rx_ticks;
8130 coal->tx_coalesce_usecs = bp->tx_ticks; 8259 coal->tx_coalesce_usecs = bp->tx_ticks;
8131 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8132 8260
8133 return 0; 8261 return 0;
8134} 8262}
@@ -8146,44 +8274,12 @@ static int bnx2x_set_coalesce(struct net_device *dev,
8146 if (bp->tx_ticks > 0x3000) 8274 if (bp->tx_ticks > 0x3000)
8147 bp->tx_ticks = 0x3000; 8275 bp->tx_ticks = 0x3000;
8148 8276
8149 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8150 if (bp->stats_ticks > 0xffff00)
8151 bp->stats_ticks = 0xffff00;
8152 bp->stats_ticks &= 0xffff00;
8153
8154 if (netif_running(dev)) 8277 if (netif_running(dev))
8155 bnx2x_update_coalesce(bp); 8278 bnx2x_update_coalesce(bp);
8156 8279
8157 return 0; 8280 return 0;
8158} 8281}
8159 8282
8160static int bnx2x_set_flags(struct net_device *dev, u32 data)
8161{
8162 struct bnx2x *bp = netdev_priv(dev);
8163 int changed = 0;
8164 int rc = 0;
8165
8166 if (data & ETH_FLAG_LRO) {
8167 if (!(dev->features & NETIF_F_LRO)) {
8168 dev->features |= NETIF_F_LRO;
8169 bp->flags |= TPA_ENABLE_FLAG;
8170 changed = 1;
8171 }
8172
8173 } else if (dev->features & NETIF_F_LRO) {
8174 dev->features &= ~NETIF_F_LRO;
8175 bp->flags &= ~TPA_ENABLE_FLAG;
8176 changed = 1;
8177 }
8178
8179 if (changed && netif_running(dev)) {
8180 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8181 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8182 }
8183
8184 return rc;
8185}
8186
8187static void bnx2x_get_ringparam(struct net_device *dev, 8283static void bnx2x_get_ringparam(struct net_device *dev,
8188 struct ethtool_ringparam *ering) 8284 struct ethtool_ringparam *ering)
8189{ 8285{
@@ -8266,7 +8362,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8266 8362
8267 if (epause->autoneg) { 8363 if (epause->autoneg) {
8268 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 8364 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8269 DP(NETIF_MSG_LINK, "Autoneg not supported\n"); 8365 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8270 return -EINVAL; 8366 return -EINVAL;
8271 } 8367 }
8272 8368
@@ -8285,6 +8381,34 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8285 return 0; 8381 return 0;
8286} 8382}
8287 8383
8384static int bnx2x_set_flags(struct net_device *dev, u32 data)
8385{
8386 struct bnx2x *bp = netdev_priv(dev);
8387 int changed = 0;
8388 int rc = 0;
8389
8390 /* TPA requires Rx CSUM offloading */
8391 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8392 if (!(dev->features & NETIF_F_LRO)) {
8393 dev->features |= NETIF_F_LRO;
8394 bp->flags |= TPA_ENABLE_FLAG;
8395 changed = 1;
8396 }
8397
8398 } else if (dev->features & NETIF_F_LRO) {
8399 dev->features &= ~NETIF_F_LRO;
8400 bp->flags &= ~TPA_ENABLE_FLAG;
8401 changed = 1;
8402 }
8403
8404 if (changed && netif_running(dev)) {
8405 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8406 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8407 }
8408
8409 return rc;
8410}
8411
8288static u32 bnx2x_get_rx_csum(struct net_device *dev) 8412static u32 bnx2x_get_rx_csum(struct net_device *dev)
8289{ 8413{
8290 struct bnx2x *bp = netdev_priv(dev); 8414 struct bnx2x *bp = netdev_priv(dev);
@@ -8295,9 +8419,19 @@ static u32 bnx2x_get_rx_csum(struct net_device *dev)
8295static int bnx2x_set_rx_csum(struct net_device *dev, u32 data) 8419static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8296{ 8420{
8297 struct bnx2x *bp = netdev_priv(dev); 8421 struct bnx2x *bp = netdev_priv(dev);
8422 int rc = 0;
8298 8423
8299 bp->rx_csum = data; 8424 bp->rx_csum = data;
8300 return 0; 8425
8426 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8427 TPA'ed packets will be discarded due to wrong TCP CSUM */
8428 if (!data) {
8429 u32 flags = ethtool_op_get_flags(dev);
8430
8431 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8432 }
8433
8434 return rc;
8301} 8435}
8302 8436
8303static int bnx2x_set_tso(struct net_device *dev, u32 data) 8437static int bnx2x_set_tso(struct net_device *dev, u32 data)
@@ -8335,6 +8469,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8335{ 8469{
8336 int idx, i, rc = -ENODEV; 8470 int idx, i, rc = -ENODEV;
8337 u32 wr_val = 0; 8471 u32 wr_val = 0;
8472 int port = BP_PORT(bp);
8338 static const struct { 8473 static const struct {
8339 u32 offset0; 8474 u32 offset0;
8340 u32 offset1; 8475 u32 offset1;
@@ -8400,7 +8535,6 @@ static int bnx2x_test_registers(struct bnx2x *bp)
8400 8535
8401 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) { 8536 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8402 u32 offset, mask, save_val, val; 8537 u32 offset, mask, save_val, val;
8403 int port = BP_PORT(bp);
8404 8538
8405 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1; 8539 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8406 mask = reg_tbl[i].mask; 8540 mask = reg_tbl[i].mask;
@@ -8446,16 +8580,17 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8446 static const struct { 8580 static const struct {
8447 char *name; 8581 char *name;
8448 u32 offset; 8582 u32 offset;
8449 u32 mask; 8583 u32 e1_mask;
8584 u32 e1h_mask;
8450 } prty_tbl[] = { 8585 } prty_tbl[] = {
8451 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0 }, 8586 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8452 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0 }, 8587 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8453 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0 }, 8588 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8454 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0 }, 8589 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8455 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0 }, 8590 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8456 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x1 }, 8591 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8457 8592
8458 { NULL, 0xffffffff, 0 } 8593 { NULL, 0xffffffff, 0, 0 }
8459 }; 8594 };
8460 8595
8461 if (!netif_running(bp->dev)) 8596 if (!netif_running(bp->dev))
@@ -8469,7 +8604,8 @@ static int bnx2x_test_memory(struct bnx2x *bp)
8469 /* Check the parity status */ 8604 /* Check the parity status */
8470 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) { 8605 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8471 val = REG_RD(bp, prty_tbl[i].offset); 8606 val = REG_RD(bp, prty_tbl[i].offset);
8472 if (val & ~(prty_tbl[i].mask)) { 8607 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8608 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8473 DP(NETIF_MSG_HW, 8609 DP(NETIF_MSG_HW,
8474 "%s is 0x%x\n", prty_tbl[i].name, val); 8610 "%s is 0x%x\n", prty_tbl[i].name, val);
8475 goto test_mem_exit; 8611 goto test_mem_exit;
@@ -8539,15 +8675,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8539 8675
8540 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8676 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8541 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8677 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8542 bnx2x_phy_hw_lock(bp); 8678 bnx2x_acquire_phy_lock(bp);
8543 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8679 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8544 bnx2x_phy_hw_unlock(bp); 8680 bnx2x_release_phy_lock(bp);
8545 8681
8546 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8682 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8547 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8683 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8548 bnx2x_phy_hw_lock(bp); 8684 bnx2x_acquire_phy_lock(bp);
8549 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8685 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8550 bnx2x_phy_hw_unlock(bp); 8686 bnx2x_release_phy_lock(bp);
8551 /* wait until link state is restored */ 8687 /* wait until link state is restored */
8552 bnx2x_wait_for_link(bp, link_up); 8688 bnx2x_wait_for_link(bp, link_up);
8553 8689
@@ -8771,7 +8907,7 @@ static void bnx2x_self_test(struct net_device *dev,
8771 if (!netif_running(dev)) 8907 if (!netif_running(dev))
8772 return; 8908 return;
8773 8909
8774 /* offline tests are not suppoerted in MF mode */ 8910 /* offline tests are not supported in MF mode */
8775 if (IS_E1HMF(bp)) 8911 if (IS_E1HMF(bp))
8776 etest->flags &= ~ETH_TEST_FL_OFFLINE; 8912 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8777 8913
@@ -8827,76 +8963,99 @@ static const struct {
8827 long offset; 8963 long offset;
8828 int size; 8964 int size;
8829 u32 flags; 8965 u32 flags;
8830 char string[ETH_GSTRING_LEN]; 8966#define STATS_FLAGS_PORT 1
8967#define STATS_FLAGS_FUNC 2
8968 u8 string[ETH_GSTRING_LEN];
8831} bnx2x_stats_arr[BNX2X_NUM_STATS] = { 8969} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8832/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" }, 8970/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8833 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" }, 8971 8, STATS_FLAGS_FUNC, "rx_bytes" },
8834 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" }, 8972 { STATS_OFFSET32(error_bytes_received_hi),
8835 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" }, 8973 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8974 { STATS_OFFSET32(total_bytes_transmitted_hi),
8975 8, STATS_FLAGS_FUNC, "tx_bytes" },
8976 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8977 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8836 { STATS_OFFSET32(total_unicast_packets_received_hi), 8978 { STATS_OFFSET32(total_unicast_packets_received_hi),
8837 8, 1, "rx_ucast_packets" }, 8979 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8838 { STATS_OFFSET32(total_multicast_packets_received_hi), 8980 { STATS_OFFSET32(total_multicast_packets_received_hi),
8839 8, 1, "rx_mcast_packets" }, 8981 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8840 { STATS_OFFSET32(total_broadcast_packets_received_hi), 8982 { STATS_OFFSET32(total_broadcast_packets_received_hi),
8841 8, 1, "rx_bcast_packets" }, 8983 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8842 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8984 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8843 8, 1, "tx_packets" }, 8985 8, STATS_FLAGS_FUNC, "tx_packets" },
8844 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8986 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8845 8, 0, "tx_mac_errors" }, 8987 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8846/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8988/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8847 8, 0, "tx_carrier_errors" }, 8989 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8848 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8990 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8849 8, 0, "rx_crc_errors" }, 8991 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8850 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8992 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8851 8, 0, "rx_align_errors" }, 8993 8, STATS_FLAGS_PORT, "rx_align_errors" },
8852 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8994 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8853 8, 0, "tx_single_collisions" }, 8995 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8854 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8996 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8855 8, 0, "tx_multi_collisions" }, 8997 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8856 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8998 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8857 8, 0, "tx_deferred" }, 8999 8, STATS_FLAGS_PORT, "tx_deferred" },
8858 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 9000 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8859 8, 0, "tx_excess_collisions" }, 9001 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
8860 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 9002 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8861 8, 0, "tx_late_collisions" }, 9003 8, STATS_FLAGS_PORT, "tx_late_collisions" },
8862 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 9004 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8863 8, 0, "tx_total_collisions" }, 9005 8, STATS_FLAGS_PORT, "tx_total_collisions" },
8864 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 9006 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8865 8, 0, "rx_fragments" }, 9007 8, STATS_FLAGS_PORT, "rx_fragments" },
8866/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" }, 9008/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9009 8, STATS_FLAGS_PORT, "rx_jabbers" },
8867 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 9010 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8868 8, 0, "rx_undersize_packets" }, 9011 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
8869 { STATS_OFFSET32(jabber_packets_received), 9012 { STATS_OFFSET32(jabber_packets_received),
8870 4, 1, "rx_oversize_packets" }, 9013 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
8871 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 9014 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8872 8, 0, "tx_64_byte_packets" }, 9015 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
8873 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 9016 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8874 8, 0, "tx_65_to_127_byte_packets" }, 9017 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
8875 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 9018 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8876 8, 0, "tx_128_to_255_byte_packets" }, 9019 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
8877 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 9020 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8878 8, 0, "tx_256_to_511_byte_packets" }, 9021 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
8879 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 9022 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8880 8, 0, "tx_512_to_1023_byte_packets" }, 9023 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
8881 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 9024 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8882 8, 0, "tx_1024_to_1522_byte_packets" }, 9025 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
8883 { STATS_OFFSET32(etherstatspktsover1522octets_hi), 9026 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8884 8, 0, "tx_1523_to_9022_byte_packets" }, 9027 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
8885/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi), 9028/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8886 8, 0, "rx_xon_frames" }, 9029 8, STATS_FLAGS_PORT, "rx_xon_frames" },
8887 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi), 9030 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8888 8, 0, "rx_xoff_frames" }, 9031 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
8889 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" }, 9032 { STATS_OFFSET32(tx_stat_outxonsent_hi),
8890 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" }, 9033 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9034 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9035 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
8891 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 9036 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8892 8, 0, "rx_mac_ctrl_frames" }, 9037 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
8893 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" }, 9038 { STATS_OFFSET32(mac_filter_discard),
8894 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" }, 9039 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
8895 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" }, 9040 { STATS_OFFSET32(no_buff_discard),
8896 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" }, 9041 4, STATS_FLAGS_FUNC, "rx_discards" },
8897/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" } 9042 { STATS_OFFSET32(xxoverflow_discard),
9043 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9044 { STATS_OFFSET32(brb_drop_hi),
9045 8, STATS_FLAGS_PORT, "brb_discard" },
9046 { STATS_OFFSET32(brb_truncate_hi),
9047 8, STATS_FLAGS_PORT, "brb_truncate" },
9048/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9049 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9050 { STATS_OFFSET32(rx_skb_alloc_failed),
9051 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9052/* 42 */{ STATS_OFFSET32(hw_csum_err),
9053 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
8898}; 9054};
8899 9055
9056#define IS_NOT_E1HMF_STAT(bp, i) \
9057 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9058
8900static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 9059static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8901{ 9060{
8902 struct bnx2x *bp = netdev_priv(dev); 9061 struct bnx2x *bp = netdev_priv(dev);
@@ -8905,7 +9064,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8905 switch (stringset) { 9064 switch (stringset) {
8906 case ETH_SS_STATS: 9065 case ETH_SS_STATS:
8907 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9066 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8908 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9067 if (IS_NOT_E1HMF_STAT(bp, i))
8909 continue; 9068 continue;
8910 strcpy(buf + j*ETH_GSTRING_LEN, 9069 strcpy(buf + j*ETH_GSTRING_LEN,
8911 bnx2x_stats_arr[i].string); 9070 bnx2x_stats_arr[i].string);
@@ -8925,7 +9084,7 @@ static int bnx2x_get_stats_count(struct net_device *dev)
8925 int i, num_stats = 0; 9084 int i, num_stats = 0;
8926 9085
8927 for (i = 0; i < BNX2X_NUM_STATS; i++) { 9086 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8928 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9087 if (IS_NOT_E1HMF_STAT(bp, i))
8929 continue; 9088 continue;
8930 num_stats++; 9089 num_stats++;
8931 } 9090 }
@@ -8940,7 +9099,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
8940 int i, j; 9099 int i, j;
8941 9100
8942 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) { 9101 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8943 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags)) 9102 if (IS_NOT_E1HMF_STAT(bp, i))
8944 continue; 9103 continue;
8945 9104
8946 if (bnx2x_stats_arr[i].size == 0) { 9105 if (bnx2x_stats_arr[i].size == 0) {
@@ -9057,7 +9216,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9057 PCI_PM_CTRL_PME_STATUS)); 9216 PCI_PM_CTRL_PME_STATUS));
9058 9217
9059 if (pmcsr & PCI_PM_CTRL_STATE_MASK) 9218 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9060 /* delay required during transition out of D3hot */ 9219 /* delay required during transition out of D3hot */
9061 msleep(20); 9220 msleep(20);
9062 break; 9221 break;
9063 9222
@@ -9104,17 +9263,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9104 9263
9105 bnx2x_update_fpsb_idx(fp); 9264 bnx2x_update_fpsb_idx(fp);
9106 9265
9107 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || 9266 if (BNX2X_HAS_TX_WORK(fp))
9108 (fp->tx_pkt_prod != fp->tx_pkt_cons))
9109 bnx2x_tx_int(fp, budget); 9267 bnx2x_tx_int(fp, budget);
9110 9268
9111 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons) 9269 if (BNX2X_HAS_RX_WORK(fp))
9112 work_done = bnx2x_rx_int(fp, budget); 9270 work_done = bnx2x_rx_int(fp, budget);
9113 9271
9114 rmb(); /* bnx2x_has_work() reads the status block */ 9272 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9115 9273
9116 /* must not complete if we consumed full budget */ 9274 /* must not complete if we consumed full budget */
9117 if ((work_done < budget) && !bnx2x_has_work(fp)) { 9275 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9118 9276
9119#ifdef BNX2X_STOP_ON_ERROR 9277#ifdef BNX2X_STOP_ON_ERROR
9120poll_panic: 9278poll_panic:
@@ -9131,7 +9289,7 @@ poll_panic:
9131 9289
9132 9290
9133/* we split the first BD into headers and data BDs 9291/* we split the first BD into headers and data BDs
9134 * to ease the pain of our fellow micocode engineers 9292 * to ease the pain of our fellow microcode engineers
9135 * we use one mapping for both BDs 9293 * we use one mapping for both BDs
9136 * So far this has only been observed to happen 9294 * So far this has only been observed to happen
9137 * in Other Operating Systems(TM) 9295 * in Other Operating Systems(TM)
@@ -9238,7 +9396,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9238 /* Check if LSO packet needs to be copied: 9396 /* Check if LSO packet needs to be copied:
9239 3 = 1 (for headers BD) + 2 (for PBD and last BD) */ 9397 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9240 int wnd_size = MAX_FETCH_BD - 3; 9398 int wnd_size = MAX_FETCH_BD - 3;
9241 /* Number of widnows to check */ 9399 /* Number of windows to check */
9242 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size; 9400 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9243 int wnd_idx = 0; 9401 int wnd_idx = 0;
9244 int frag_idx = 0; 9402 int frag_idx = 0;
@@ -9340,7 +9498,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9340 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9498 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9341 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9499 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9342 9500
9343 /* First, check if we need to linearaize the skb 9501 /* First, check if we need to linearize the skb
9344 (due to FW restrictions) */ 9502 (due to FW restrictions) */
9345 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 9503 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9346 /* Statistics of linearization */ 9504 /* Statistics of linearization */
@@ -9349,7 +9507,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9349 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - " 9507 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9350 "silently dropping this SKB\n"); 9508 "silently dropping this SKB\n");
9351 dev_kfree_skb_any(skb); 9509 dev_kfree_skb_any(skb);
9352 return 0; 9510 return NETDEV_TX_OK;
9353 } 9511 }
9354 } 9512 }
9355 9513
@@ -9372,7 +9530,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9372 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 9530 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9373 tx_bd->general_data = (UNICAST_ADDRESS << 9531 tx_bd->general_data = (UNICAST_ADDRESS <<
9374 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); 9532 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9375 tx_bd->general_data |= 1; /* header nbd */ 9533 /* header nbd */
9534 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9376 9535
9377 /* remember the first BD of the packet */ 9536 /* remember the first BD of the packet */
9378 tx_buf->first_bd = fp->tx_bd_prod; 9537 tx_buf->first_bd = fp->tx_bd_prod;
@@ -9451,7 +9610,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9451 9610
9452 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 9611 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9453 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 9612 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9454 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2); 9613 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
9455 tx_bd->nbd = cpu_to_le16(nbd); 9614 tx_bd->nbd = cpu_to_le16(nbd);
9456 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 9615 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9457 9616
@@ -9721,9 +9880,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9721 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 9880 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9722 if (netif_running(dev)) { 9881 if (netif_running(dev)) {
9723 if (CHIP_IS_E1(bp)) 9882 if (CHIP_IS_E1(bp))
9724 bnx2x_set_mac_addr_e1(bp); 9883 bnx2x_set_mac_addr_e1(bp, 1);
9725 else 9884 else
9726 bnx2x_set_mac_addr_e1h(bp); 9885 bnx2x_set_mac_addr_e1h(bp, 1);
9727 } 9886 }
9728 9887
9729 return 0; 9888 return 0;
@@ -9734,6 +9893,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9734{ 9893{
9735 struct mii_ioctl_data *data = if_mii(ifr); 9894 struct mii_ioctl_data *data = if_mii(ifr);
9736 struct bnx2x *bp = netdev_priv(dev); 9895 struct bnx2x *bp = netdev_priv(dev);
9896 int port = BP_PORT(bp);
9737 int err; 9897 int err;
9738 9898
9739 switch (cmd) { 9899 switch (cmd) {
@@ -9749,7 +9909,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9749 return -EAGAIN; 9909 return -EAGAIN;
9750 9910
9751 mutex_lock(&bp->port.phy_mutex); 9911 mutex_lock(&bp->port.phy_mutex);
9752 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9912 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9753 DEFAULT_PHY_DEV_ADDR, 9913 DEFAULT_PHY_DEV_ADDR,
9754 (data->reg_num & 0x1f), &mii_regval); 9914 (data->reg_num & 0x1f), &mii_regval);
9755 data->val_out = mii_regval; 9915 data->val_out = mii_regval;
@@ -9765,7 +9925,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9765 return -EAGAIN; 9925 return -EAGAIN;
9766 9926
9767 mutex_lock(&bp->port.phy_mutex); 9927 mutex_lock(&bp->port.phy_mutex);
9768 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr, 9928 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9769 DEFAULT_PHY_DEV_ADDR, 9929 DEFAULT_PHY_DEV_ADDR,
9770 (data->reg_num & 0x1f), data->val_in); 9930 (data->reg_num & 0x1f), data->val_in);
9771 mutex_unlock(&bp->port.phy_mutex); 9931 mutex_unlock(&bp->port.phy_mutex);
@@ -10141,7 +10301,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10141 10301
10142 netif_device_detach(dev); 10302 netif_device_detach(dev);
10143 10303
10144 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10304 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10145 10305
10146 bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); 10306 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10147 10307
@@ -10174,7 +10334,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
10174 bnx2x_set_power_state(bp, PCI_D0); 10334 bnx2x_set_power_state(bp, PCI_D0);
10175 netif_device_attach(dev); 10335 netif_device_attach(dev);
10176 10336
10177 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 10337 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10178 10338
10179 rtnl_unlock(); 10339 rtnl_unlock();
10180 10340
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 15c9a9946724..a67b0c358ae4 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -6,7 +6,7 @@
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 * 8 *
9 * The registers description starts with the regsister Access type followed 9 * The registers description starts with the register Access type followed
10 * by size in bits. For example [RW 32]. The access types are: 10 * by size in bits. For example [RW 32]. The access types are:
11 * R - Read only 11 * R - Read only
12 * RC - Clear on read 12 * RC - Clear on read
@@ -49,7 +49,7 @@
49/* [RW 10] Write client 0: Assert pause threshold. */ 49/* [RW 10] Write client 0: Assert pause threshold. */
50#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068 50#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
51#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c 51#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
52/* [R 24] The number of full blocks occpied by port. */ 52/* [R 24] The number of full blocks occupied by port. */
53#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094 53#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
54/* [RW 1] Reset the design by software. */ 54/* [RW 1] Reset the design by software. */
55#define BRB1_REG_SOFT_RESET 0x600dc 55#define BRB1_REG_SOFT_RESET 0x600dc
@@ -740,6 +740,7 @@
740#define HC_REG_ATTN_MSG1_ADDR_L 0x108020 740#define HC_REG_ATTN_MSG1_ADDR_L 0x108020
741#define HC_REG_ATTN_NUM_P0 0x108038 741#define HC_REG_ATTN_NUM_P0 0x108038
742#define HC_REG_ATTN_NUM_P1 0x10803c 742#define HC_REG_ATTN_NUM_P1 0x10803c
743#define HC_REG_COMMAND_REG 0x108180
743#define HC_REG_CONFIG_0 0x108000 744#define HC_REG_CONFIG_0 0x108000
744#define HC_REG_CONFIG_1 0x108004 745#define HC_REG_CONFIG_1 0x108004
745#define HC_REG_FUNC_NUM_P0 0x1080ac 746#define HC_REG_FUNC_NUM_P0 0x1080ac
@@ -1372,6 +1373,23 @@
1372 be asserted). */ 1373 be asserted). */
1373#define MISC_REG_DRIVER_CONTROL_16 0xa5f0 1374#define MISC_REG_DRIVER_CONTROL_16 0xa5f0
1374#define MISC_REG_DRIVER_CONTROL_16_SIZE 2 1375#define MISC_REG_DRIVER_CONTROL_16_SIZE 2
1376/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1377 32 clients. Each client can be controlled by one driver only. One in each
1378 bit represent that this driver control the appropriate client (Ex: bit 5
1379 is set means this driver control client number 5). addr1 = set; addr0 =
1380 clear; read from both addresses will give the same result = status. write
1381 to address 1 will set a request to control all the clients that their
1382 appropriate bit (in the write command) is set. if the client is free (the
1383 appropriate bit in all the other drivers is clear) one will be written to
1384 that driver register; if the client isn't free the bit will remain zero.
1385 if the appropriate bit is set (the driver request to gain control on a
1386 client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
1387 interrupt will be asserted). write to address 0 will set a request to
1388 free all the clients that their appropriate bit (in the write command) is
1389 set. if the appropriate bit is clear (the driver request to free a client
1390 it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
1391 be asserted). */
1392#define MISC_REG_DRIVER_CONTROL_7 0xa3c8
1375/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0 1393/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
1376 only. */ 1394 only. */
1377#define MISC_REG_E1HMF_MODE 0xa5f8 1395#define MISC_REG_E1HMF_MODE 0xa5f8
@@ -1394,13 +1412,13 @@
1394#define MISC_REG_GPIO 0xa490 1412#define MISC_REG_GPIO 0xa490
1395/* [R 28] this field hold the last information that caused reserved 1413/* [R 28] this field hold the last information that caused reserved
1396 attention. bits [19:0] - address; [22:20] function; [23] reserved; 1414 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1397 [27:24] the master thatcaused the attention - according to the following 1415 [27:24] the master that caused the attention - according to the following
1398 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = 1416 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1399 dbu; 8 = dmae */ 1417 dbu; 8 = dmae */
1400#define MISC_REG_GRC_RSV_ATTN 0xa3c0 1418#define MISC_REG_GRC_RSV_ATTN 0xa3c0
1401/* [R 28] this field hold the last information that caused timeout 1419/* [R 28] this field hold the last information that caused timeout
1402 attention. bits [19:0] - address; [22:20] function; [23] reserved; 1420 attention. bits [19:0] - address; [22:20] function; [23] reserved;
1403 [27:24] the master thatcaused the attention - according to the following 1421 [27:24] the master that caused the attention - according to the following
1404 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 = 1422 encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
1405 dbu; 8 = dmae */ 1423 dbu; 8 = dmae */
1406#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4 1424#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
@@ -1677,6 +1695,7 @@
1677/* [RW 8] init credit counter for port0 in LLH */ 1695/* [RW 8] init credit counter for port0 in LLH */
1678#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554 1696#define NIG_REG_LLH0_XCM_INIT_CREDIT 0x10554
1679#define NIG_REG_LLH0_XCM_MASK 0x10130 1697#define NIG_REG_LLH0_XCM_MASK 0x10130
1698#define NIG_REG_LLH1_BRB1_DRV_MASK 0x10248
1680/* [RW 1] send to BRB1 if no match on any of RMP rules. */ 1699/* [RW 1] send to BRB1 if no match on any of RMP rules. */
1681#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc 1700#define NIG_REG_LLH1_BRB1_NOT_MCP 0x102dc
1682/* [RW 2] Determine the classification participants. 0: no classification.1: 1701/* [RW 2] Determine the classification participants. 0: no classification.1:
@@ -1727,6 +1746,9 @@
1727/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure 1746/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
1728 for port0 */ 1747 for port0 */
1729#define NIG_REG_STAT0_BRB_DISCARD 0x105f0 1748#define NIG_REG_STAT0_BRB_DISCARD 0x105f0
1749/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
1750 for port0 */
1751#define NIG_REG_STAT0_BRB_TRUNCATE 0x105f8
1730/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that 1752/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
1731 between 1024 and 1522 bytes for port0 */ 1753 between 1024 and 1522 bytes for port0 */
1732#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750 1754#define NIG_REG_STAT0_EGRESS_MAC_PKT0 0x10750
@@ -2298,7 +2320,7 @@
2298/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; 2320/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
2299 -128k */ 2321 -128k */
2300#define PXP2_REG_RQ_QM_P_SIZE 0x120050 2322#define PXP2_REG_RQ_QM_P_SIZE 0x120050
2301/* [RW 1] 1' indicates that the RBC has finished configurating the PSWRQ */ 2323/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
2302#define PXP2_REG_RQ_RBC_DONE 0x1201b0 2324#define PXP2_REG_RQ_RBC_DONE 0x1201b0
2303/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; 2325/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
2304 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ 2326 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
@@ -2406,7 +2428,7 @@
2406/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the 2428/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2407 buffer reaches this number has_payload will be asserted */ 2429 buffer reaches this number has_payload will be asserted */
2408#define PXP2_REG_WR_DMAE_MPS 0x1205ec 2430#define PXP2_REG_WR_DMAE_MPS 0x1205ec
2409/* [RW 10] if Number of entries in dmae fifo will be higer than this 2431/* [RW 10] if Number of entries in dmae fifo will be higher than this
2410 threshold then has_payload indication will be asserted; the default value 2432 threshold then has_payload indication will be asserted; the default value
2411 should be equal to &gt; write MBS size! */ 2433 should be equal to &gt; write MBS size! */
2412#define PXP2_REG_WR_DMAE_TH 0x120368 2434#define PXP2_REG_WR_DMAE_TH 0x120368
@@ -2427,7 +2449,7 @@
2427/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the 2449/* [RW 2] 0 - 128B; - 256B; - 512B; - 1024B; when the payload in the
2428 buffer reaches this number has_payload will be asserted */ 2450 buffer reaches this number has_payload will be asserted */
2429#define PXP2_REG_WR_TSDM_MPS 0x1205d4 2451#define PXP2_REG_WR_TSDM_MPS 0x1205d4
2430/* [RW 10] if Number of entries in usdmdp fifo will be higer than this 2452/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
2431 threshold then has_payload indication will be asserted; the default value 2453 threshold then has_payload indication will be asserted; the default value
2432 should be equal to &gt; write MBS size! */ 2454 should be equal to &gt; write MBS size! */
2433#define PXP2_REG_WR_USDMDP_TH 0x120348 2455#define PXP2_REG_WR_USDMDP_TH 0x120348
@@ -3294,12 +3316,12 @@
3294#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0 3316#define XSEM_XSEM_INT_MASK_0_REG_ADDRESS_ERROR_SIZE 0
3295#define CFC_DEBUG1_REG_WRITE_AC (0x1<<4) 3317#define CFC_DEBUG1_REG_WRITE_AC (0x1<<4)
3296#define CFC_DEBUG1_REG_WRITE_AC_SIZE 4 3318#define CFC_DEBUG1_REG_WRITE_AC_SIZE 4
3297/* [R 1] debug only: This bit indicates wheter indicates that external 3319/* [R 1] debug only: This bit indicates whether indicates that external
3298 buffer was wrapped (oldest data was thrown); Relevant only when 3320 buffer was wrapped (oldest data was thrown); Relevant only when
3299 ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */ 3321 ~dbg_registers_debug_target=2 (PCI) & ~dbg_registers_full_mode=1 (wrap); */
3300#define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124 3322#define DBG_REG_WRAP_ON_EXT_BUFFER 0xc124
3301#define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1 3323#define DBG_REG_WRAP_ON_EXT_BUFFER_SIZE 1
3302/* [R 1] debug only: This bit indicates wheter the internal buffer was 3324/* [R 1] debug only: This bit indicates whether the internal buffer was
3303 wrapped (oldest data was thrown) Relevant only when 3325 wrapped (oldest data was thrown) Relevant only when
3304 ~dbg_registers_debug_target=0 (internal buffer) */ 3326 ~dbg_registers_debug_target=0 (internal buffer) */
3305#define DBG_REG_WRAP_ON_INT_BUFFER 0xc128 3327#define DBG_REG_WRAP_ON_INT_BUFFER 0xc128
@@ -4944,6 +4966,7 @@
4944#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) 4966#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
4945#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) 4967#define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31)
4946#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) 4968#define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3)
4969#define EMAC_TX_MODE_FLOW_EN (1L<<4)
4947#define MISC_REGISTERS_GPIO_0 0 4970#define MISC_REGISTERS_GPIO_0 0
4948#define MISC_REGISTERS_GPIO_1 1 4971#define MISC_REGISTERS_GPIO_1 1
4949#define MISC_REGISTERS_GPIO_2 2 4972#define MISC_REGISTERS_GPIO_2 2
@@ -4959,6 +4982,7 @@
4959#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 4982#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
4960#define MISC_REGISTERS_GPIO_SET_POS 8 4983#define MISC_REGISTERS_GPIO_SET_POS 8
4961#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 4984#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
4985#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
4962#define MISC_REGISTERS_RESET_REG_1_SET 0x584 4986#define MISC_REGISTERS_RESET_REG_1_SET 0x584
4963#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 4987#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
4964#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) 4988#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
@@ -4993,7 +5017,9 @@
4993#define HW_LOCK_MAX_RESOURCE_VALUE 31 5017#define HW_LOCK_MAX_RESOURCE_VALUE 31
4994#define HW_LOCK_RESOURCE_8072_MDIO 0 5018#define HW_LOCK_RESOURCE_8072_MDIO 0
4995#define HW_LOCK_RESOURCE_GPIO 1 5019#define HW_LOCK_RESOURCE_GPIO 1
5020#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
4996#define HW_LOCK_RESOURCE_SPIO 2 5021#define HW_LOCK_RESOURCE_SPIO 2
5022#define HW_LOCK_RESOURCE_UNDI 5
4997#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) 5023#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18)
4998#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) 5024#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31)
4999#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) 5025#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9)
@@ -5144,59 +5170,73 @@
5144#define GRCBASE_MISC_AEU GRCBASE_MISC 5170#define GRCBASE_MISC_AEU GRCBASE_MISC
5145 5171
5146 5172
5147/*the offset of the configuration space in the pci core register*/ 5173/* offset of configuration space in the pci core register */
5148#define PCICFG_OFFSET 0x2000 5174#define PCICFG_OFFSET 0x2000
5149#define PCICFG_VENDOR_ID_OFFSET 0x00 5175#define PCICFG_VENDOR_ID_OFFSET 0x00
5150#define PCICFG_DEVICE_ID_OFFSET 0x02 5176#define PCICFG_DEVICE_ID_OFFSET 0x02
5151#define PCICFG_COMMAND_OFFSET 0x04 5177#define PCICFG_COMMAND_OFFSET 0x04
5178#define PCICFG_COMMAND_IO_SPACE (1<<0)
5179#define PCICFG_COMMAND_MEM_SPACE (1<<1)
5180#define PCICFG_COMMAND_BUS_MASTER (1<<2)
5181#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
5182#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
5183#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
5184#define PCICFG_COMMAND_PERR_ENA (1<<6)
5185#define PCICFG_COMMAND_STEPPING (1<<7)
5186#define PCICFG_COMMAND_SERR_ENA (1<<8)
5187#define PCICFG_COMMAND_FAST_B2B (1<<9)
5188#define PCICFG_COMMAND_INT_DISABLE (1<<10)
5189#define PCICFG_COMMAND_RESERVED (0x1f<<11)
5152#define PCICFG_STATUS_OFFSET 0x06 5190#define PCICFG_STATUS_OFFSET 0x06
5153#define PCICFG_REVESION_ID 0x08 5191#define PCICFG_REVESION_ID 0x08
5154#define PCICFG_CACHE_LINE_SIZE 0x0c 5192#define PCICFG_CACHE_LINE_SIZE 0x0c
5155#define PCICFG_LATENCY_TIMER 0x0d 5193#define PCICFG_LATENCY_TIMER 0x0d
5156#define PCICFG_BAR_1_LOW 0x10 5194#define PCICFG_BAR_1_LOW 0x10
5157#define PCICFG_BAR_1_HIGH 0x14 5195#define PCICFG_BAR_1_HIGH 0x14
5158#define PCICFG_BAR_2_LOW 0x18 5196#define PCICFG_BAR_2_LOW 0x18
5159#define PCICFG_BAR_2_HIGH 0x1c 5197#define PCICFG_BAR_2_HIGH 0x1c
5160#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c 5198#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
5161#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e 5199#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
5162#define PCICFG_INT_LINE 0x3c 5200#define PCICFG_INT_LINE 0x3c
5163#define PCICFG_INT_PIN 0x3d 5201#define PCICFG_INT_PIN 0x3d
5164#define PCICFG_PM_CSR_OFFSET 0x4c 5202#define PCICFG_PM_CAPABILITY 0x48
5165#define PCICFG_GRC_ADDRESS 0x78 5203#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
5166#define PCICFG_GRC_DATA 0x80 5204#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
5205#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
5206#define PCICFG_PM_CAPABILITY_DSI (1<<21)
5207#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
5208#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
5209#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
5210#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
5211#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
5212#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
5213#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
5214#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
5215#define PCICFG_PM_CSR_OFFSET 0x4c
5216#define PCICFG_PM_CSR_STATE (0x3<<0)
5217#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
5218#define PCICFG_PM_CSR_PME_STATUS (1<<15)
5219#define PCICFG_GRC_ADDRESS 0x78
5220#define PCICFG_GRC_DATA 0x80
5167#define PCICFG_DEVICE_CONTROL 0xb4 5221#define PCICFG_DEVICE_CONTROL 0xb4
5168#define PCICFG_LINK_CONTROL 0xbc 5222#define PCICFG_LINK_CONTROL 0xbc
5169 5223
5170#define PCICFG_COMMAND_IO_SPACE (1<<0)
5171#define PCICFG_COMMAND_MEM_SPACE (1<<1)
5172#define PCICFG_COMMAND_BUS_MASTER (1<<2)
5173#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
5174#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
5175#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
5176#define PCICFG_COMMAND_PERR_ENA (1<<6)
5177#define PCICFG_COMMAND_STEPPING (1<<7)
5178#define PCICFG_COMMAND_SERR_ENA (1<<8)
5179#define PCICFG_COMMAND_FAST_B2B (1<<9)
5180#define PCICFG_COMMAND_INT_DISABLE (1<<10)
5181#define PCICFG_COMMAND_RESERVED (0x1f<<11)
5182
5183#define PCICFG_PM_CSR_STATE (0x3<<0)
5184#define PCICFG_PM_CSR_PME_STATUS (1<<15)
5185 5224
5186#define BAR_USTRORM_INTMEM 0x400000 5225#define BAR_USTRORM_INTMEM 0x400000
5187#define BAR_CSTRORM_INTMEM 0x410000 5226#define BAR_CSTRORM_INTMEM 0x410000
5188#define BAR_XSTRORM_INTMEM 0x420000 5227#define BAR_XSTRORM_INTMEM 0x420000
5189#define BAR_TSTRORM_INTMEM 0x430000 5228#define BAR_TSTRORM_INTMEM 0x430000
5190 5229
5230/* for accessing the IGU in case of status block ACK */
5191#define BAR_IGU_INTMEM 0x440000 5231#define BAR_IGU_INTMEM 0x440000
5192 5232
5193#define BAR_DOORBELL_OFFSET 0x800000 5233#define BAR_DOORBELL_OFFSET 0x800000
5194 5234
5195#define BAR_ME_REGISTER 0x450000 5235#define BAR_ME_REGISTER 0x450000
5196 5236
5197 5237/* config_2 offset */
5198#define GRC_CONFIG_2_SIZE_REG 0x408 /* config_2 offset */ 5238#define GRC_CONFIG_2_SIZE_REG 0x408
5199#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0) 5239#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
5200#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0) 5240#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
5201#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0) 5241#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
5202#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0) 5242#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
@@ -5213,11 +5253,11 @@
5213#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0) 5253#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
5214#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0) 5254#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
5215#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0) 5255#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
5216#define PCI_CONFIG_2_BAR1_64ENA (1L<<4) 5256#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
5217#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5) 5257#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
5218#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6) 5258#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
5219#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7) 5259#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
5220#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8) 5260#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
5221#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8) 5261#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
5222#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8) 5262#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
5223#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8) 5263#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
@@ -5234,46 +5274,44 @@
5234#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8) 5274#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
5235#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8) 5275#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
5236#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8) 5276#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
5237#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16) 5277#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
5238#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17) 5278#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
5239 5279
5240/* config_3 offset */ 5280/* config_3 offset */
5241#define GRC_CONFIG_3_SIZE_REG (0x40c) 5281#define GRC_CONFIG_3_SIZE_REG 0x40c
5242#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0) 5282#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
5243#define PCI_CONFIG_3_FORCE_PME (1L<<24) 5283#define PCI_CONFIG_3_FORCE_PME (1L<<24)
5244#define PCI_CONFIG_3_PME_STATUS (1L<<25) 5284#define PCI_CONFIG_3_PME_STATUS (1L<<25)
5245#define PCI_CONFIG_3_PME_ENABLE (1L<<26) 5285#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
5246#define PCI_CONFIG_3_PM_STATE (0x3L<<27) 5286#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
5247#define PCI_CONFIG_3_VAUX_PRESET (1L<<30) 5287#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
5248#define PCI_CONFIG_3_PCI_POWER (1L<<31) 5288#define PCI_CONFIG_3_PCI_POWER (1L<<31)
5249
5250/* config_2 offset */
5251#define GRC_CONFIG_2_SIZE_REG 0x408
5252 5289
5253#define GRC_BAR2_CONFIG 0x4e0 5290#define GRC_BAR2_CONFIG 0x4e0
5254#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0) 5291#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
5255#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0) 5292#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
5256#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0) 5293#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
5257#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0) 5294#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
5258#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0) 5295#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
5259#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0) 5296#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
5260#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0) 5297#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
5261#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0) 5298#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
5262#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0) 5299#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
5263#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0) 5300#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
5264#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0) 5301#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
5265#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0) 5302#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
5266#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0) 5303#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
5267#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0) 5304#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
5268#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0) 5305#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
5269#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0) 5306#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
5270#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0) 5307#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
5271#define PCI_CONFIG_2_BAR2_64ENA (1L<<4) 5308#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
5309
5310#define PCI_PM_DATA_A 0x410
5311#define PCI_PM_DATA_B 0x414
5312#define PCI_ID_VAL1 0x434
5313#define PCI_ID_VAL2 0x438
5272 5314
5273#define PCI_PM_DATA_A (0x410)
5274#define PCI_PM_DATA_B (0x414)
5275#define PCI_ID_VAL1 (0x434)
5276#define PCI_ID_VAL2 (0x438)
5277 5315
5278#define MDIO_REG_BANK_CL73_IEEEB0 0x0 5316#define MDIO_REG_BANK_CL73_IEEEB0 0x0
5279#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 5317#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
@@ -5522,6 +5560,8 @@ Theotherbitsarereservedandshouldbezero*/
5522#define MDIO_PMA_REG_GEN_CTRL 0xca10 5560#define MDIO_PMA_REG_GEN_CTRL 0xca10
5523#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188 5561#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP 0x0188
5524#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a 5562#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET 0x018a
5563#define MDIO_PMA_REG_M8051_MSGIN_REG 0xca12
5564#define MDIO_PMA_REG_M8051_MSGOUT_REG 0xca13
5525#define MDIO_PMA_REG_ROM_VER1 0xca19 5565#define MDIO_PMA_REG_ROM_VER1 0xca19
5526#define MDIO_PMA_REG_ROM_VER2 0xca1a 5566#define MDIO_PMA_REG_ROM_VER2 0xca1a
5527#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b 5567#define MDIO_PMA_REG_EDC_FFE_MAIN 0xca1b
@@ -5576,7 +5616,8 @@ Theotherbitsarereservedandshouldbezero*/
5576#define MDIO_AN_REG_LINK_STATUS 0x8304 5616#define MDIO_AN_REG_LINK_STATUS 0x8304
5577#define MDIO_AN_REG_CL37_CL73 0x8370 5617#define MDIO_AN_REG_CL37_CL73 0x8370
5578#define MDIO_AN_REG_CL37_AN 0xffe0 5618#define MDIO_AN_REG_CL37_AN 0xffe0
5579#define MDIO_AN_REG_CL37_FD 0xffe4 5619#define MDIO_AN_REG_CL37_FC_LD 0xffe4
5620#define MDIO_AN_REG_CL37_FC_LP 0xffe5
5580 5621
5581 5622
5582#define IGU_FUNC_BASE 0x0400 5623#define IGU_FUNC_BASE 0x0400
@@ -5600,4 +5641,13 @@ Theotherbitsarereservedandshouldbezero*/
5600#define IGU_INT_NOP 2 5641#define IGU_INT_NOP 2
5601#define IGU_INT_NOP2 3 5642#define IGU_INT_NOP2 3
5602 5643
5644#define COMMAND_REG_INT_ACK 0x0
5645#define COMMAND_REG_PROD_UPD 0x4
5646#define COMMAND_REG_ATTN_BITS_UPD 0x8
5647#define COMMAND_REG_ATTN_BITS_SET 0xc
5648#define COMMAND_REG_ATTN_BITS_CLR 0x10
5649#define COMMAND_REG_COALESCE_NOW 0x14
5650#define COMMAND_REG_SIMD_MASK 0x18
5651#define COMMAND_REG_SIMD_NOMASK 0x1c
5652
5603 5653
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 18354817173c..4a10b5624f72 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -308,9 +308,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
308 entry->msi_attrib.masked); 308 entry->msi_attrib.masked);
309 309
310 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 310 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
311 control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE); 311 control &= ~PCI_MSI_FLAGS_QSIZE;
312 if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked) 312 control |= PCI_MSI_FLAGS_ENABLE;
313 control |= PCI_MSI_FLAGS_ENABLE;
314 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 313 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
315} 314}
316 315
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0a3d856833fc..c9884bba22de 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1060,7 +1060,7 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1060 * The caller must verify that the device is capable of generating PME# before 1060 * The caller must verify that the device is capable of generating PME# before
1061 * calling this function with @enable equal to 'true'. 1061 * calling this function with @enable equal to 'true'.
1062 */ 1062 */
1063static void pci_pme_active(struct pci_dev *dev, bool enable) 1063void pci_pme_active(struct pci_dev *dev, bool enable)
1064{ 1064{
1065 u16 pmcsr; 1065 u16 pmcsr;
1066 1066
@@ -1941,6 +1941,7 @@ EXPORT_SYMBOL(pci_set_power_state);
1941EXPORT_SYMBOL(pci_save_state); 1941EXPORT_SYMBOL(pci_save_state);
1942EXPORT_SYMBOL(pci_restore_state); 1942EXPORT_SYMBOL(pci_restore_state);
1943EXPORT_SYMBOL(pci_pme_capable); 1943EXPORT_SYMBOL(pci_pme_capable);
1944EXPORT_SYMBOL(pci_pme_active);
1944EXPORT_SYMBOL(pci_enable_wake); 1945EXPORT_SYMBOL(pci_enable_wake);
1945EXPORT_SYMBOL(pci_target_state); 1946EXPORT_SYMBOL(pci_target_state);
1946EXPORT_SYMBOL(pci_prepare_to_sleep); 1947EXPORT_SYMBOL(pci_prepare_to_sleep);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 7098dfb07449..a04498d390c8 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -52,27 +52,49 @@ EXPORT_SYMBOL(no_pci_devices);
52 * Some platforms allow access to legacy I/O port and ISA memory space on 52 * Some platforms allow access to legacy I/O port and ISA memory space on
53 * a per-bus basis. This routine creates the files and ties them into 53 * a per-bus basis. This routine creates the files and ties them into
54 * their associated read, write and mmap files from pci-sysfs.c 54 * their associated read, write and mmap files from pci-sysfs.c
55 *
56 * On error unwind, but don't propogate the error to the caller
57 * as it is ok to set up the PCI bus without these files.
55 */ 58 */
56static void pci_create_legacy_files(struct pci_bus *b) 59static void pci_create_legacy_files(struct pci_bus *b)
57{ 60{
61 int error;
62
58 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, 63 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
59 GFP_ATOMIC); 64 GFP_ATOMIC);
60 if (b->legacy_io) { 65 if (!b->legacy_io)
61 b->legacy_io->attr.name = "legacy_io"; 66 goto kzalloc_err;
62 b->legacy_io->size = 0xffff; 67
63 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; 68 b->legacy_io->attr.name = "legacy_io";
64 b->legacy_io->read = pci_read_legacy_io; 69 b->legacy_io->size = 0xffff;
65 b->legacy_io->write = pci_write_legacy_io; 70 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
66 device_create_bin_file(&b->dev, b->legacy_io); 71 b->legacy_io->read = pci_read_legacy_io;
67 72 b->legacy_io->write = pci_write_legacy_io;
68 /* Allocated above after the legacy_io struct */ 73 error = device_create_bin_file(&b->dev, b->legacy_io);
69 b->legacy_mem = b->legacy_io + 1; 74 if (error)
70 b->legacy_mem->attr.name = "legacy_mem"; 75 goto legacy_io_err;
71 b->legacy_mem->size = 1024*1024; 76
72 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; 77 /* Allocated above after the legacy_io struct */
73 b->legacy_mem->mmap = pci_mmap_legacy_mem; 78 b->legacy_mem = b->legacy_io + 1;
74 device_create_bin_file(&b->dev, b->legacy_mem); 79 b->legacy_mem->attr.name = "legacy_mem";
75 } 80 b->legacy_mem->size = 1024*1024;
81 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
82 b->legacy_mem->mmap = pci_mmap_legacy_mem;
83 error = device_create_bin_file(&b->dev, b->legacy_mem);
84 if (error)
85 goto legacy_mem_err;
86
87 return;
88
89legacy_mem_err:
90 device_remove_bin_file(&b->dev, b->legacy_io);
91legacy_io_err:
92 kfree(b->legacy_io);
93 b->legacy_io = NULL;
94kzalloc_err:
95 printk(KERN_WARNING "pci: warning: could not create legacy I/O port "
96 "and ISA memory resources to sysfs\n");
97 return;
76} 98}
77 99
78void pci_remove_legacy_files(struct pci_bus *b) 100void pci_remove_legacy_files(struct pci_bus *b)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 0fb365074288..9236e7f869c8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1756,9 +1756,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_c
1756 */ 1756 */
1757static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev) 1757static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
1758{ 1758{
1759 /* Only disable the VPD capability for 5706, 5708, and 5709 rev. A */ 1759 /*
1760 * Only disable the VPD capability for 5706, 5706S, 5708,
1761 * 5708S and 5709 rev. A
1762 */
1760 if ((dev->device == PCI_DEVICE_ID_NX2_5706) || 1763 if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
1764 (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
1761 (dev->device == PCI_DEVICE_ID_NX2_5708) || 1765 (dev->device == PCI_DEVICE_ID_NX2_5708) ||
1766 (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
1762 ((dev->device == PCI_DEVICE_ID_NX2_5709) && 1767 ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
1763 (dev->revision & 0xf0) == 0x0)) { 1768 (dev->revision & 0xf0) == 0x0)) {
1764 if (dev->vpd) 1769 if (dev->vpd)
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index a8771ffc61e8..e07b5c51ec5b 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -23,12 +23,57 @@
23 23
24static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 24static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
25{ 25{
26 skt->irq = IRQ_GPIO(GPIO_NR_PALMTX_PCMCIA_READY); 26 int ret;
27
28 ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER1, "PCMCIA PWR1");
29 if (ret)
30 goto err1;
31 ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER1, 0);
32 if (ret)
33 goto err2;
34
35 ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_POWER2, "PCMCIA PWR2");
36 if (ret)
37 goto err2;
38 ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_POWER2, 0);
39 if (ret)
40 goto err3;
41
42 ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_RESET, "PCMCIA RST");
43 if (ret)
44 goto err3;
45 ret = gpio_direction_output(GPIO_NR_PALMTX_PCMCIA_RESET, 1);
46 if (ret)
47 goto err4;
48
49 ret = gpio_request(GPIO_NR_PALMTX_PCMCIA_READY, "PCMCIA RDY");
50 if (ret)
51 goto err4;
52 ret = gpio_direction_input(GPIO_NR_PALMTX_PCMCIA_READY);
53 if (ret)
54 goto err5;
55
56 skt->irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY);
27 return 0; 57 return 0;
58
59err5:
60 gpio_free(GPIO_NR_PALMTX_PCMCIA_READY);
61err4:
62 gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET);
63err3:
64 gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2);
65err2:
66 gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1);
67err1:
68 return ret;
28} 69}
29 70
30static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 71static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
31{ 72{
73 gpio_free(GPIO_NR_PALMTX_PCMCIA_READY);
74 gpio_free(GPIO_NR_PALMTX_PCMCIA_RESET);
75 gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER2);
76 gpio_free(GPIO_NR_PALMTX_PCMCIA_POWER1);
32} 77}
33 78
34static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt, 79static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
@@ -109,7 +154,7 @@ static void __exit palmtx_pcmcia_exit(void)
109 platform_device_unregister(palmtx_pcmcia_device); 154 platform_device_unregister(palmtx_pcmcia_device);
110} 155}
111 156
112fs_initcall(palmtx_pcmcia_init); 157module_init(palmtx_pcmcia_init);
113module_exit(palmtx_pcmcia_exit); 158module_exit(palmtx_pcmcia_exit);
114 159
115MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); 160MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 856cc1af40df..35dcc06eb3e2 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/rtc.h> 15#include <linux/rtc.h>
16#include <linux/smp_lock.h>
17#include "rtc-core.h" 16#include "rtc-core.h"
18 17
19static dev_t rtc_devt; 18static dev_t rtc_devt;
@@ -27,11 +26,8 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
27 struct rtc_device, char_dev); 26 struct rtc_device, char_dev);
28 const struct rtc_class_ops *ops = rtc->ops; 27 const struct rtc_class_ops *ops = rtc->ops;
29 28
30 lock_kernel(); 29 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
31 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags)) { 30 return -EBUSY;
32 err = -EBUSY;
33 goto out;
34 }
35 31
36 file->private_data = rtc; 32 file->private_data = rtc;
37 33
@@ -41,13 +37,11 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
41 rtc->irq_data = 0; 37 rtc->irq_data = 0;
42 spin_unlock_irq(&rtc->irq_lock); 38 spin_unlock_irq(&rtc->irq_lock);
43 39
44 goto out; 40 return 0;
45 } 41 }
46 42
47 /* something has gone wrong */ 43 /* something has gone wrong */
48 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); 44 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
49out:
50 unlock_kernel();
51 return err; 45 return err;
52} 46}
53 47
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index fbb90b1e4098..a81adab6e515 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -482,7 +482,7 @@ isl1208_sysfs_register(struct device *dev)
482static int 482static int
483isl1208_sysfs_unregister(struct device *dev) 483isl1208_sysfs_unregister(struct device *dev)
484{ 484{
485 device_remove_file(dev, &dev_attr_atrim); 485 device_remove_file(dev, &dev_attr_dtrim);
486 device_remove_file(dev, &dev_attr_atrim); 486 device_remove_file(dev, &dev_attr_atrim);
487 device_remove_file(dev, &dev_attr_usr); 487 device_remove_file(dev, &dev_attr_usr);
488 488
diff --git a/drivers/sbus/sbus.c b/drivers/sbus/sbus.c
index 73a86d09bba8..9c129248466c 100644
--- a/drivers/sbus/sbus.c
+++ b/drivers/sbus/sbus.c
@@ -7,13 +7,13 @@
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/of_device.h>
10 11
11#include <asm/system.h> 12#include <asm/system.h>
12#include <asm/sbus.h> 13#include <asm/sbus.h>
13#include <asm/dma.h> 14#include <asm/dma.h>
14#include <asm/oplib.h> 15#include <asm/oplib.h>
15#include <asm/prom.h> 16#include <asm/prom.h>
16#include <asm/of_device.h>
17#include <asm/bpp.h> 17#include <asm/bpp.h>
18#include <asm/irq.h> 18#include <asm/irq.h>
19 19
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fcdd73f25625..994da56fffed 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -680,7 +680,7 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
680 680
681} 681}
682 682
683const struct scsi_dh_devlist alua_dev_list[] = { 683static const struct scsi_dh_devlist alua_dev_list[] = {
684 {"HP", "MSA VOLUME" }, 684 {"HP", "MSA VOLUME" },
685 {"HP", "HSV101" }, 685 {"HP", "HSV101" },
686 {"HP", "HSV111" }, 686 {"HP", "HSV111" },
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index aa46b131b20e..b9d23e9e9a44 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -562,7 +562,7 @@ done:
562 return result; 562 return result;
563} 563}
564 564
565const struct scsi_dh_devlist clariion_dev_list[] = { 565static const struct scsi_dh_devlist clariion_dev_list[] = {
566 {"DGC", "RAID"}, 566 {"DGC", "RAID"},
567 {"DGC", "DISK"}, 567 {"DGC", "DISK"},
568 {"DGC", "VRAID"}, 568 {"DGC", "VRAID"},
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 9c7a1f8ebb72..a6a4ef3ad51c 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -282,7 +282,7 @@ static int hp_sw_activate(struct scsi_device *sdev)
282 return ret; 282 return ret;
283} 283}
284 284
285const struct scsi_dh_devlist hp_sw_dh_data_list[] = { 285static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
286 {"COMPAQ", "MSA1000 VOLUME"}, 286 {"COMPAQ", "MSA1000 VOLUME"},
287 {"COMPAQ", "HSV110"}, 287 {"COMPAQ", "HSV110"},
288 {"HP", "HSV100"}, 288 {"HP", "HSV100"},
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b093a501f8ae..e7c7b4ebc1fe 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -574,7 +574,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
574 return SCSI_RETURN_NOT_HANDLED; 574 return SCSI_RETURN_NOT_HANDLED;
575} 575}
576 576
577const struct scsi_dh_devlist rdac_dev_list[] = { 577static const struct scsi_dh_devlist rdac_dev_list[] = {
578 {"IBM", "1722"}, 578 {"IBM", "1722"},
579 {"IBM", "1724"}, 579 {"IBM", "1724"},
580 {"IBM", "1726"}, 580 {"IBM", "1726"},
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 3b4a14e355c1..77cb34270fc1 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -449,6 +449,7 @@ config SERIAL_CLPS711X_CONSOLE
449config SERIAL_SAMSUNG 449config SERIAL_SAMSUNG
450 tristate "Samsung SoC serial support" 450 tristate "Samsung SoC serial support"
451 depends on ARM && PLAT_S3C24XX 451 depends on ARM && PLAT_S3C24XX
452 select SERIAL_CORE
452 help 453 help
453 Support for the on-chip UARTs on the Samsung S3C24XX series CPUs, 454 Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
454 providing /dev/ttySAC0, 1 and 2 (note, some machines may not 455 providing /dev/ttySAC0, 1 and 2 (note, some machines may not
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index aeeec5588afd..e41766d08035 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -17,11 +17,11 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/of_device.h>
20 21
21#include <asm/hypervisor.h> 22#include <asm/hypervisor.h>
22#include <asm/spitfire.h> 23#include <asm/spitfire.h>
23#include <asm/prom.h> 24#include <asm/prom.h>
24#include <asm/of_device.h>
25#include <asm/irq.h> 25#include <asm/irq.h>
26 26
27#if defined(CONFIG_MAGIC_SYSRQ) 27#if defined(CONFIG_MAGIC_SYSRQ)
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 15ee497e1c78..29b4458abf74 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -32,11 +32,11 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/delay.h> 33#include <linux/delay.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/of_device.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/irq.h> 38#include <asm/irq.h>
38#include <asm/prom.h> 39#include <asm/prom.h>
39#include <asm/of_device.h>
40 40
41#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 41#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
42#define SUPPORT_SYSRQ 42#define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index e24e68235088..a378464f9292 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -35,11 +35,11 @@
35#include <linux/serial_reg.h> 35#include <linux/serial_reg.h>
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/of_device.h>
38 39
39#include <asm/io.h> 40#include <asm/io.h>
40#include <asm/irq.h> 41#include <asm/irq.h>
41#include <asm/prom.h> 42#include <asm/prom.h>
42#include <asm/of_device.h>
43 43
44#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 44#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
45#define SUPPORT_SYSRQ 45#define SUPPORT_SYSRQ
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 0f3d69b86d67..3cb4c8aee13f 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -32,11 +32,11 @@
32#include <linux/serio.h> 32#include <linux/serio.h>
33#endif 33#endif
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/of_device.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/irq.h> 38#include <asm/irq.h>
38#include <asm/prom.h> 39#include <asm/prom.h>
39#include <asm/of_device.h>
40 40
41#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 41#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
42#define SUPPORT_SYSRQ 42#define SUPPORT_SYSRQ
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 964124b60db2..75e86865234c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -226,10 +226,11 @@ EXPORT_SYMBOL_GPL(spi_alloc_device);
226 * Companion function to spi_alloc_device. Devices allocated with 226 * Companion function to spi_alloc_device. Devices allocated with
227 * spi_alloc_device can be added onto the spi bus with this function. 227 * spi_alloc_device can be added onto the spi bus with this function.
228 * 228 *
229 * Returns 0 on success; non-zero on failure 229 * Returns 0 on success; negative errno on failure
230 */ 230 */
231int spi_add_device(struct spi_device *spi) 231int spi_add_device(struct spi_device *spi)
232{ 232{
233 static DEFINE_MUTEX(spi_add_lock);
233 struct device *dev = spi->master->dev.parent; 234 struct device *dev = spi->master->dev.parent;
234 int status; 235 int status;
235 236
@@ -246,26 +247,43 @@ int spi_add_device(struct spi_device *spi)
246 "%s.%u", spi->master->dev.bus_id, 247 "%s.%u", spi->master->dev.bus_id,
247 spi->chip_select); 248 spi->chip_select);
248 249
249 /* drivers may modify this initial i/o setup */ 250
251 /* We need to make sure there's no other device with this
252 * chipselect **BEFORE** we call setup(), else we'll trash
253 * its configuration. Lock against concurrent add() calls.
254 */
255 mutex_lock(&spi_add_lock);
256
257 if (bus_find_device_by_name(&spi_bus_type, NULL, spi->dev.bus_id)
258 != NULL) {
259 dev_err(dev, "chipselect %d already in use\n",
260 spi->chip_select);
261 status = -EBUSY;
262 goto done;
263 }
264
265 /* Drivers may modify this initial i/o setup, but will
266 * normally rely on the device being setup. Devices
267 * using SPI_CS_HIGH can't coexist well otherwise...
268 */
250 status = spi->master->setup(spi); 269 status = spi->master->setup(spi);
251 if (status < 0) { 270 if (status < 0) {
252 dev_err(dev, "can't %s %s, status %d\n", 271 dev_err(dev, "can't %s %s, status %d\n",
253 "setup", spi->dev.bus_id, status); 272 "setup", spi->dev.bus_id, status);
254 return status; 273 goto done;
255 } 274 }
256 275
257 /* driver core catches callers that misbehave by defining 276 /* Device may be bound to an active driver when this returns */
258 * devices that already exist.
259 */
260 status = device_add(&spi->dev); 277 status = device_add(&spi->dev);
261 if (status < 0) { 278 if (status < 0)
262 dev_err(dev, "can't %s %s, status %d\n", 279 dev_err(dev, "can't %s %s, status %d\n",
263 "add", spi->dev.bus_id, status); 280 "add", spi->dev.bus_id, status);
264 return status; 281 else
265 } 282 dev_dbg(dev, "registered child %s\n", spi->dev.bus_id);
266 283
267 dev_dbg(dev, "registered child %s\n", spi->dev.bus_id); 284done:
268 return 0; 285 mutex_unlock(&spi_add_lock);
286 return status;
269} 287}
270EXPORT_SYMBOL_GPL(spi_add_device); 288EXPORT_SYMBOL_GPL(spi_add_device);
271 289
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 755823cdf62a..bcefbddeba50 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -95,16 +95,18 @@ config USB
95 95
96source "drivers/usb/core/Kconfig" 96source "drivers/usb/core/Kconfig"
97 97
98source "drivers/usb/mon/Kconfig"
99
98source "drivers/usb/host/Kconfig" 100source "drivers/usb/host/Kconfig"
99 101
102source "drivers/usb/musb/Kconfig"
103
100source "drivers/usb/class/Kconfig" 104source "drivers/usb/class/Kconfig"
101 105
102source "drivers/usb/storage/Kconfig" 106source "drivers/usb/storage/Kconfig"
103 107
104source "drivers/usb/image/Kconfig" 108source "drivers/usb/image/Kconfig"
105 109
106source "drivers/usb/mon/Kconfig"
107
108comment "USB port drivers" 110comment "USB port drivers"
109 depends on USB 111 depends on USB
110 112
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 507a9bd0d77c..9aea43a8c4ad 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -602,7 +602,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
602 offd = le32_to_cpu(buf[offb++]); 602 offd = le32_to_cpu(buf[offb++]);
603 if (offd >= size) { 603 if (offd >= size) {
604 if (printk_ratelimit()) 604 if (printk_ratelimit())
605 usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n", 605 usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n",
606 offd, cm); 606 offd, cm);
607 ret = -EIO; 607 ret = -EIO;
608 goto cleanup; 608 goto cleanup;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0725b1871f23..efc4373ededb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -51,6 +51,7 @@
51 */ 51 */
52 52
53#undef DEBUG 53#undef DEBUG
54#undef VERBOSE_DEBUG
54 55
55#include <linux/kernel.h> 56#include <linux/kernel.h>
56#include <linux/errno.h> 57#include <linux/errno.h>
@@ -70,6 +71,9 @@
70 71
71#include "cdc-acm.h" 72#include "cdc-acm.h"
72 73
74
75#define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */
76
73/* 77/*
74 * Version Information 78 * Version Information
75 */ 79 */
@@ -85,6 +89,12 @@ static DEFINE_MUTEX(open_mutex);
85 89
86#define ACM_READY(acm) (acm && acm->dev && acm->used) 90#define ACM_READY(acm) (acm && acm->dev && acm->used)
87 91
92#ifdef VERBOSE_DEBUG
93#define verbose 1
94#else
95#define verbose 0
96#endif
97
88/* 98/*
89 * Functions for ACM control messages. 99 * Functions for ACM control messages.
90 */ 100 */
@@ -136,19 +146,17 @@ static int acm_wb_alloc(struct acm *acm)
136static int acm_wb_is_avail(struct acm *acm) 146static int acm_wb_is_avail(struct acm *acm)
137{ 147{
138 int i, n; 148 int i, n;
149 unsigned long flags;
139 150
140 n = ACM_NW; 151 n = ACM_NW;
152 spin_lock_irqsave(&acm->write_lock, flags);
141 for (i = 0; i < ACM_NW; i++) { 153 for (i = 0; i < ACM_NW; i++) {
142 n -= acm->wb[i].use; 154 n -= acm->wb[i].use;
143 } 155 }
156 spin_unlock_irqrestore(&acm->write_lock, flags);
144 return n; 157 return n;
145} 158}
146 159
147static inline int acm_wb_is_used(struct acm *acm, int wbn)
148{
149 return acm->wb[wbn].use;
150}
151
152/* 160/*
153 * Finish write. 161 * Finish write.
154 */ 162 */
@@ -157,7 +165,6 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
157 unsigned long flags; 165 unsigned long flags;
158 166
159 spin_lock_irqsave(&acm->write_lock, flags); 167 spin_lock_irqsave(&acm->write_lock, flags);
160 acm->write_ready = 1;
161 wb->use = 0; 168 wb->use = 0;
162 acm->transmitting--; 169 acm->transmitting--;
163 spin_unlock_irqrestore(&acm->write_lock, flags); 170 spin_unlock_irqrestore(&acm->write_lock, flags);
@@ -190,40 +197,25 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
190static int acm_write_start(struct acm *acm, int wbn) 197static int acm_write_start(struct acm *acm, int wbn)
191{ 198{
192 unsigned long flags; 199 unsigned long flags;
193 struct acm_wb *wb; 200 struct acm_wb *wb = &acm->wb[wbn];
194 int rc; 201 int rc;
195 202
196 spin_lock_irqsave(&acm->write_lock, flags); 203 spin_lock_irqsave(&acm->write_lock, flags);
197 if (!acm->dev) { 204 if (!acm->dev) {
205 wb->use = 0;
198 spin_unlock_irqrestore(&acm->write_lock, flags); 206 spin_unlock_irqrestore(&acm->write_lock, flags);
199 return -ENODEV; 207 return -ENODEV;
200 } 208 }
201 209
202 if (!acm->write_ready) {
203 spin_unlock_irqrestore(&acm->write_lock, flags);
204 return 0; /* A white lie */
205 }
206
207 wb = &acm->wb[wbn];
208 if(acm_wb_is_avail(acm) <= 1)
209 acm->write_ready = 0;
210
211 dbg("%s susp_count: %d", __func__, acm->susp_count); 210 dbg("%s susp_count: %d", __func__, acm->susp_count);
212 if (acm->susp_count) { 211 if (acm->susp_count) {
213 acm->old_ready = acm->write_ready;
214 acm->delayed_wb = wb; 212 acm->delayed_wb = wb;
215 acm->write_ready = 0;
216 schedule_work(&acm->waker); 213 schedule_work(&acm->waker);
217 spin_unlock_irqrestore(&acm->write_lock, flags); 214 spin_unlock_irqrestore(&acm->write_lock, flags);
218 return 0; /* A white lie */ 215 return 0; /* A white lie */
219 } 216 }
220 usb_mark_last_busy(acm->dev); 217 usb_mark_last_busy(acm->dev);
221 218
222 if (!acm_wb_is_used(acm, wbn)) {
223 spin_unlock_irqrestore(&acm->write_lock, flags);
224 return 0;
225 }
226
227 rc = acm_start_wb(acm, wb); 219 rc = acm_start_wb(acm, wb);
228 spin_unlock_irqrestore(&acm->write_lock, flags); 220 spin_unlock_irqrestore(&acm->write_lock, flags);
229 221
@@ -488,22 +480,28 @@ urbs:
488/* data interface wrote those outgoing bytes */ 480/* data interface wrote those outgoing bytes */
489static void acm_write_bulk(struct urb *urb) 481static void acm_write_bulk(struct urb *urb)
490{ 482{
491 struct acm *acm;
492 struct acm_wb *wb = urb->context; 483 struct acm_wb *wb = urb->context;
484 struct acm *acm = wb->instance;
493 485
494 dbg("Entering acm_write_bulk with status %d", urb->status); 486 if (verbose || urb->status
487 || (urb->actual_length != urb->transfer_buffer_length))
488 dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n",
489 urb->actual_length,
490 urb->transfer_buffer_length,
491 urb->status);
495 492
496 acm = wb->instance;
497 acm_write_done(acm, wb); 493 acm_write_done(acm, wb);
498 if (ACM_READY(acm)) 494 if (ACM_READY(acm))
499 schedule_work(&acm->work); 495 schedule_work(&acm->work);
496 else
497 wake_up_interruptible(&acm->drain_wait);
500} 498}
501 499
502static void acm_softint(struct work_struct *work) 500static void acm_softint(struct work_struct *work)
503{ 501{
504 struct acm *acm = container_of(work, struct acm, work); 502 struct acm *acm = container_of(work, struct acm, work);
505 dbg("Entering acm_softint."); 503
506 504 dev_vdbg(&acm->data->dev, "tx work\n");
507 if (!ACM_READY(acm)) 505 if (!ACM_READY(acm))
508 return; 506 return;
509 tty_wakeup(acm->tty); 507 tty_wakeup(acm->tty);
@@ -512,7 +510,6 @@ static void acm_softint(struct work_struct *work)
512static void acm_waker(struct work_struct *waker) 510static void acm_waker(struct work_struct *waker)
513{ 511{
514 struct acm *acm = container_of(waker, struct acm, waker); 512 struct acm *acm = container_of(waker, struct acm, waker);
515 long flags;
516 int rv; 513 int rv;
517 514
518 rv = usb_autopm_get_interface(acm->control); 515 rv = usb_autopm_get_interface(acm->control);
@@ -524,9 +521,6 @@ static void acm_waker(struct work_struct *waker)
524 acm_start_wb(acm, acm->delayed_wb); 521 acm_start_wb(acm, acm->delayed_wb);
525 acm->delayed_wb = NULL; 522 acm->delayed_wb = NULL;
526 } 523 }
527 spin_lock_irqsave(&acm->write_lock, flags);
528 acm->write_ready = acm->old_ready;
529 spin_unlock_irqrestore(&acm->write_lock, flags);
530 usb_autopm_put_interface(acm->control); 524 usb_autopm_put_interface(acm->control);
531} 525}
532 526
@@ -628,6 +622,8 @@ static void acm_tty_unregister(struct acm *acm)
628 kfree(acm); 622 kfree(acm);
629} 623}
630 624
625static int acm_tty_chars_in_buffer(struct tty_struct *tty);
626
631static void acm_tty_close(struct tty_struct *tty, struct file *filp) 627static void acm_tty_close(struct tty_struct *tty, struct file *filp)
632{ 628{
633 struct acm *acm = tty->driver_data; 629 struct acm *acm = tty->driver_data;
@@ -642,6 +638,13 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
642 if (acm->dev) { 638 if (acm->dev) {
643 usb_autopm_get_interface(acm->control); 639 usb_autopm_get_interface(acm->control);
644 acm_set_control(acm, acm->ctrlout = 0); 640 acm_set_control(acm, acm->ctrlout = 0);
641
642 /* try letting the last writes drain naturally */
643 wait_event_interruptible_timeout(acm->drain_wait,
644 (ACM_NW == acm_wb_is_avail(acm))
645 || !acm->dev,
646 ACM_CLOSE_TIMEOUT * HZ);
647
645 usb_kill_urb(acm->ctrlurb); 648 usb_kill_urb(acm->ctrlurb);
646 for (i = 0; i < ACM_NW; i++) 649 for (i = 0; i < ACM_NW; i++)
647 usb_kill_urb(acm->wb[i].urb); 650 usb_kill_urb(acm->wb[i].urb);
@@ -697,7 +700,7 @@ static int acm_tty_write_room(struct tty_struct *tty)
697 * Do not let the line discipline to know that we have a reserve, 700 * Do not let the line discipline to know that we have a reserve,
698 * or it might get too enthusiastic. 701 * or it might get too enthusiastic.
699 */ 702 */
700 return (acm->write_ready && acm_wb_is_avail(acm)) ? acm->writesize : 0; 703 return acm_wb_is_avail(acm) ? acm->writesize : 0;
701} 704}
702 705
703static int acm_tty_chars_in_buffer(struct tty_struct *tty) 706static int acm_tty_chars_in_buffer(struct tty_struct *tty)
@@ -1072,11 +1075,11 @@ skip_normal_probe:
1072 acm->urb_task.data = (unsigned long) acm; 1075 acm->urb_task.data = (unsigned long) acm;
1073 INIT_WORK(&acm->work, acm_softint); 1076 INIT_WORK(&acm->work, acm_softint);
1074 INIT_WORK(&acm->waker, acm_waker); 1077 INIT_WORK(&acm->waker, acm_waker);
1078 init_waitqueue_head(&acm->drain_wait);
1075 spin_lock_init(&acm->throttle_lock); 1079 spin_lock_init(&acm->throttle_lock);
1076 spin_lock_init(&acm->write_lock); 1080 spin_lock_init(&acm->write_lock);
1077 spin_lock_init(&acm->read_lock); 1081 spin_lock_init(&acm->read_lock);
1078 mutex_init(&acm->mutex); 1082 mutex_init(&acm->mutex);
1079 acm->write_ready = 1;
1080 acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); 1083 acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
1081 1084
1082 buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); 1085 buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
@@ -1108,9 +1111,11 @@ skip_normal_probe:
1108 rcv->instance = acm; 1111 rcv->instance = acm;
1109 } 1112 }
1110 for (i = 0; i < num_rx_buf; i++) { 1113 for (i = 0; i < num_rx_buf; i++) {
1111 struct acm_rb *buf = &(acm->rb[i]); 1114 struct acm_rb *rb = &(acm->rb[i]);
1112 1115
1113 if (!(buf->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &buf->dma))) { 1116 rb->base = usb_buffer_alloc(acm->dev, readsize,
1117 GFP_KERNEL, &rb->dma);
1118 if (!rb->base) {
1114 dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n"); 1119 dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n");
1115 goto alloc_fail7; 1120 goto alloc_fail7;
1116 } 1121 }
@@ -1172,6 +1177,7 @@ skip_countries:
1172 acm_set_line(acm, &acm->line); 1177 acm_set_line(acm, &acm->line);
1173 1178
1174 usb_driver_claim_interface(&acm_driver, data_interface, acm); 1179 usb_driver_claim_interface(&acm_driver, data_interface, acm);
1180 usb_set_intfdata(data_interface, acm);
1175 1181
1176 usb_get_intf(control_interface); 1182 usb_get_intf(control_interface);
1177 tty_register_device(acm_tty_driver, minor, &control_interface->dev); 1183 tty_register_device(acm_tty_driver, minor, &control_interface->dev);
@@ -1221,11 +1227,11 @@ static void acm_disconnect(struct usb_interface *intf)
1221 struct acm *acm = usb_get_intfdata(intf); 1227 struct acm *acm = usb_get_intfdata(intf);
1222 struct usb_device *usb_dev = interface_to_usbdev(intf); 1228 struct usb_device *usb_dev = interface_to_usbdev(intf);
1223 1229
1224 mutex_lock(&open_mutex); 1230 /* sibling interface is already cleaning up */
1225 if (!acm || !acm->dev) { 1231 if (!acm)
1226 mutex_unlock(&open_mutex);
1227 return; 1232 return;
1228 } 1233
1234 mutex_lock(&open_mutex);
1229 if (acm->country_codes){ 1235 if (acm->country_codes){
1230 device_remove_file(&acm->control->dev, 1236 device_remove_file(&acm->control->dev,
1231 &dev_attr_wCountryCodes); 1237 &dev_attr_wCountryCodes);
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 85c3aaaab7c5..1f95e7aa1b66 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -106,8 +106,6 @@ struct acm {
106 struct list_head spare_read_bufs; 106 struct list_head spare_read_bufs;
107 struct list_head filled_read_bufs; 107 struct list_head filled_read_bufs;
108 int write_used; /* number of non-empty write buffers */ 108 int write_used; /* number of non-empty write buffers */
109 int write_ready; /* write urb is not running */
110 int old_ready;
111 int processing; 109 int processing;
112 int transmitting; 110 int transmitting;
113 spinlock_t write_lock; 111 spinlock_t write_lock;
@@ -115,6 +113,7 @@ struct acm {
115 struct usb_cdc_line_coding line; /* bits, stop, parity */ 113 struct usb_cdc_line_coding line; /* bits, stop, parity */
116 struct work_struct work; /* work queue entry for line discipline waking up */ 114 struct work_struct work; /* work queue entry for line discipline waking up */
117 struct work_struct waker; 115 struct work_struct waker;
116 wait_queue_head_t drain_wait; /* close processing */
118 struct tasklet_struct urb_task; /* rx processing */ 117 struct tasklet_struct urb_task; /* rx processing */
119 spinlock_t throttle_lock; /* synchronize throtteling and read callback */ 118 spinlock_t throttle_lock; /* synchronize throtteling and read callback */
120 unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ 119 unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ddb54e14a5c5..2be37fe466f2 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -774,7 +774,6 @@ void usb_deregister(struct usb_driver *driver)
774} 774}
775EXPORT_SYMBOL_GPL(usb_deregister); 775EXPORT_SYMBOL_GPL(usb_deregister);
776 776
777
778/* Forced unbinding of a USB interface driver, either because 777/* Forced unbinding of a USB interface driver, either because
779 * it doesn't support pre_reset/post_reset/reset_resume or 778 * it doesn't support pre_reset/post_reset/reset_resume or
780 * because it doesn't support suspend/resume. 779 * because it doesn't support suspend/resume.
@@ -821,6 +820,8 @@ void usb_rebind_intf(struct usb_interface *intf)
821 dev_warn(&intf->dev, "rebind failed: %d\n", rc); 820 dev_warn(&intf->dev, "rebind failed: %d\n", rc);
822} 821}
823 822
823#ifdef CONFIG_PM
824
824#define DO_UNBIND 0 825#define DO_UNBIND 0
825#define DO_REBIND 1 826#define DO_REBIND 1
826 827
@@ -872,8 +873,6 @@ static void do_unbind_rebind(struct usb_device *udev, int action)
872 } 873 }
873} 874}
874 875
875#ifdef CONFIG_PM
876
877/* Caller has locked udev's pm_mutex */ 876/* Caller has locked udev's pm_mutex */
878static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) 877static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
879{ 878{
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 586d6f1376cf..286b4431a097 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1091 continue; 1091 continue;
1092 dev_dbg(&dev->dev, "unregistering interface %s\n", 1092 dev_dbg(&dev->dev, "unregistering interface %s\n",
1093 dev_name(&interface->dev)); 1093 dev_name(&interface->dev));
1094 device_del(&interface->dev);
1095 usb_remove_sysfs_intf_files(interface); 1094 usb_remove_sysfs_intf_files(interface);
1095 device_del(&interface->dev);
1096 } 1096 }
1097 1097
1098 /* Now that the interfaces are unbound, nobody should 1098 /* Now that the interfaces are unbound, nobody should
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c6a8c6b1116a..acc95b2ac6f8 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -284,6 +284,16 @@ config USB_LH7A40X
284 default USB_GADGET 284 default USB_GADGET
285 select USB_GADGET_SELECTED 285 select USB_GADGET_SELECTED
286 286
287# built in ../musb along with host support
288config USB_GADGET_MUSB_HDRC
289 boolean "Inventra HDRC USB Peripheral (TI, ...)"
290 depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
291 select USB_GADGET_DUALSPEED
292 select USB_GADGET_SELECTED
293 help
294 This OTG-capable silicon IP is used in dual designs including
295 the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
296
287config USB_GADGET_OMAP 297config USB_GADGET_OMAP
288 boolean "OMAP USB Device Controller" 298 boolean "OMAP USB Device Controller"
289 depends on ARCH_OMAP 299 depends on ARCH_OMAP
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 21d1406af9ee..7600a0c78753 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -542,13 +542,14 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
542 req->req.context = dum; 542 req->req.context = dum;
543 req->req.complete = fifo_complete; 543 req->req.complete = fifo_complete;
544 544
545 list_add_tail(&req->queue, &ep->queue);
545 spin_unlock (&dum->lock); 546 spin_unlock (&dum->lock);
546 _req->actual = _req->length; 547 _req->actual = _req->length;
547 _req->status = 0; 548 _req->status = 0;
548 _req->complete (_ep, _req); 549 _req->complete (_ep, _req);
549 spin_lock (&dum->lock); 550 spin_lock (&dum->lock);
550 } 551 } else
551 list_add_tail (&req->queue, &ep->queue); 552 list_add_tail(&req->queue, &ep->queue);
552 spin_unlock_irqrestore (&dum->lock, flags); 553 spin_unlock_irqrestore (&dum->lock, flags);
553 554
554 /* real hardware would likely enable transfers here, in case 555 /* real hardware would likely enable transfers here, in case
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d8faccf27895..5ee1590b8e9c 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -47,18 +47,37 @@ struct f_acm {
47 u8 ctrl_id, data_id; 47 u8 ctrl_id, data_id;
48 u8 port_num; 48 u8 port_num;
49 49
50 struct usb_descriptor_header **fs_function; 50 u8 pending;
51
52 /* lock is mostly for pending and notify_req ... they get accessed
53 * by callbacks both from tty (open/close/break) under its spinlock,
54 * and notify_req.complete() which can't use that lock.
55 */
56 spinlock_t lock;
57
51 struct acm_ep_descs fs; 58 struct acm_ep_descs fs;
52 struct usb_descriptor_header **hs_function;
53 struct acm_ep_descs hs; 59 struct acm_ep_descs hs;
54 60
55 struct usb_ep *notify; 61 struct usb_ep *notify;
56 struct usb_endpoint_descriptor *notify_desc; 62 struct usb_endpoint_descriptor *notify_desc;
63 struct usb_request *notify_req;
57 64
58 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ 65 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
66
67 /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
59 u16 port_handshake_bits; 68 u16 port_handshake_bits;
60#define RS232_RTS (1 << 1) /* unused with full duplex */ 69#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
61#define RS232_DTR (1 << 0) /* host is ready for data r/w */ 70#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
71
72 /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
73 u16 serial_state;
74#define ACM_CTRL_OVERRUN (1 << 6)
75#define ACM_CTRL_PARITY (1 << 5)
76#define ACM_CTRL_FRAMING (1 << 4)
77#define ACM_CTRL_RI (1 << 3)
78#define ACM_CTRL_BRK (1 << 2)
79#define ACM_CTRL_DSR (1 << 1)
80#define ACM_CTRL_DCD (1 << 0)
62}; 81};
63 82
64static inline struct f_acm *func_to_acm(struct usb_function *f) 83static inline struct f_acm *func_to_acm(struct usb_function *f)
@@ -66,12 +85,17 @@ static inline struct f_acm *func_to_acm(struct usb_function *f)
66 return container_of(f, struct f_acm, port.func); 85 return container_of(f, struct f_acm, port.func);
67} 86}
68 87
88static inline struct f_acm *port_to_acm(struct gserial *p)
89{
90 return container_of(p, struct f_acm, port);
91}
92
69/*-------------------------------------------------------------------------*/ 93/*-------------------------------------------------------------------------*/
70 94
71/* notification endpoint uses smallish and infrequent fixed-size messages */ 95/* notification endpoint uses smallish and infrequent fixed-size messages */
72 96
73#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ 97#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
74#define GS_NOTIFY_MAXPACKET 8 98#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
75 99
76/* interface and class descriptors: */ 100/* interface and class descriptors: */
77 101
@@ -117,7 +141,7 @@ static struct usb_cdc_acm_descriptor acm_descriptor __initdata = {
117 .bLength = sizeof(acm_descriptor), 141 .bLength = sizeof(acm_descriptor),
118 .bDescriptorType = USB_DT_CS_INTERFACE, 142 .bDescriptorType = USB_DT_CS_INTERFACE,
119 .bDescriptorSubType = USB_CDC_ACM_TYPE, 143 .bDescriptorSubType = USB_CDC_ACM_TYPE,
120 .bmCapabilities = (1 << 1), 144 .bmCapabilities = USB_CDC_CAP_LINE,
121}; 145};
122 146
123static struct usb_cdc_union_desc acm_union_desc __initdata = { 147static struct usb_cdc_union_desc acm_union_desc __initdata = {
@@ -277,6 +301,11 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
277 301
278 /* composite driver infrastructure handles everything except 302 /* composite driver infrastructure handles everything except
279 * CDC class messages; interface activation uses set_alt(). 303 * CDC class messages; interface activation uses set_alt().
304 *
305 * Note CDC spec table 4 lists the ACM request profile. It requires
306 * encapsulated command support ... we don't handle any, and respond
307 * to them by stalling. Options include get/set/clear comm features
308 * (not that useful) and SEND_BREAK.
280 */ 309 */
281 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { 310 switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
282 311
@@ -312,7 +341,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
312 value = 0; 341 value = 0;
313 342
314 /* FIXME we should not allow data to flow until the 343 /* FIXME we should not allow data to flow until the
315 * host sets the RS232_DTR bit; and when it clears 344 * host sets the ACM_CTRL_DTR bit; and when it clears
316 * that bit, we should return to that no-flow state. 345 * that bit, we should return to that no-flow state.
317 */ 346 */
318 acm->port_handshake_bits = w_value; 347 acm->port_handshake_bits = w_value;
@@ -350,9 +379,6 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
350 /* we know alt == 0, so this is an activation or a reset */ 379 /* we know alt == 0, so this is an activation or a reset */
351 380
352 if (intf == acm->ctrl_id) { 381 if (intf == acm->ctrl_id) {
353 /* REVISIT this may need more work when we start to
354 * send notifications ...
355 */
356 if (acm->notify->driver_data) { 382 if (acm->notify->driver_data) {
357 VDBG(cdev, "reset acm control interface %d\n", intf); 383 VDBG(cdev, "reset acm control interface %d\n", intf);
358 usb_ep_disable(acm->notify); 384 usb_ep_disable(acm->notify);
@@ -397,6 +423,128 @@ static void acm_disable(struct usb_function *f)
397 423
398/*-------------------------------------------------------------------------*/ 424/*-------------------------------------------------------------------------*/
399 425
426/**
427 * acm_cdc_notify - issue CDC notification to host
428 * @acm: wraps host to be notified
429 * @type: notification type
430 * @value: Refer to cdc specs, wValue field.
431 * @data: data to be sent
432 * @length: size of data
433 * Context: irqs blocked, acm->lock held, acm_notify_req non-null
434 *
435 * Returns zero on sucess or a negative errno.
436 *
437 * See section 6.3.5 of the CDC 1.1 specification for information
438 * about the only notification we issue: SerialState change.
439 */
440static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
441 void *data, unsigned length)
442{
443 struct usb_ep *ep = acm->notify;
444 struct usb_request *req;
445 struct usb_cdc_notification *notify;
446 const unsigned len = sizeof(*notify) + length;
447 void *buf;
448 int status;
449
450 req = acm->notify_req;
451 acm->notify_req = NULL;
452 acm->pending = false;
453
454 req->length = len;
455 notify = req->buf;
456 buf = notify + 1;
457
458 notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
459 | USB_RECIP_INTERFACE;
460 notify->bNotificationType = type;
461 notify->wValue = cpu_to_le16(value);
462 notify->wIndex = cpu_to_le16(acm->ctrl_id);
463 notify->wLength = cpu_to_le16(length);
464 memcpy(buf, data, length);
465
466 status = usb_ep_queue(ep, req, GFP_ATOMIC);
467 if (status < 0) {
468 ERROR(acm->port.func.config->cdev,
469 "acm ttyGS%d can't notify serial state, %d\n",
470 acm->port_num, status);
471 acm->notify_req = req;
472 }
473
474 return status;
475}
476
477static int acm_notify_serial_state(struct f_acm *acm)
478{
479 struct usb_composite_dev *cdev = acm->port.func.config->cdev;
480 int status;
481
482 spin_lock(&acm->lock);
483 if (acm->notify_req) {
484 DBG(cdev, "acm ttyGS%d serial state %04x\n",
485 acm->port_num, acm->serial_state);
486 status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
487 0, &acm->serial_state, sizeof(acm->serial_state));
488 } else {
489 acm->pending = true;
490 status = 0;
491 }
492 spin_unlock(&acm->lock);
493 return status;
494}
495
496static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
497{
498 struct f_acm *acm = req->context;
499 u8 doit = false;
500
501 /* on this call path we do NOT hold the port spinlock,
502 * which is why ACM needs its own spinlock
503 */
504 spin_lock(&acm->lock);
505 if (req->status != -ESHUTDOWN)
506 doit = acm->pending;
507 acm->notify_req = req;
508 spin_unlock(&acm->lock);
509
510 if (doit)
511 acm_notify_serial_state(acm);
512}
513
514/* connect == the TTY link is open */
515
516static void acm_connect(struct gserial *port)
517{
518 struct f_acm *acm = port_to_acm(port);
519
520 acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
521 acm_notify_serial_state(acm);
522}
523
524static void acm_disconnect(struct gserial *port)
525{
526 struct f_acm *acm = port_to_acm(port);
527
528 acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
529 acm_notify_serial_state(acm);
530}
531
532static int acm_send_break(struct gserial *port, int duration)
533{
534 struct f_acm *acm = port_to_acm(port);
535 u16 state;
536
537 state = acm->serial_state;
538 state &= ~ACM_CTRL_BRK;
539 if (duration)
540 state |= ACM_CTRL_BRK;
541
542 acm->serial_state = state;
543 return acm_notify_serial_state(acm);
544}
545
546/*-------------------------------------------------------------------------*/
547
400/* ACM function driver setup/binding */ 548/* ACM function driver setup/binding */
401static int __init 549static int __init
402acm_bind(struct usb_configuration *c, struct usb_function *f) 550acm_bind(struct usb_configuration *c, struct usb_function *f)
@@ -445,8 +593,20 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
445 acm->notify = ep; 593 acm->notify = ep;
446 ep->driver_data = cdev; /* claim */ 594 ep->driver_data = cdev; /* claim */
447 595
596 /* allocate notification */
597 acm->notify_req = gs_alloc_req(ep,
598 sizeof(struct usb_cdc_notification) + 2,
599 GFP_KERNEL);
600 if (!acm->notify_req)
601 goto fail;
602
603 acm->notify_req->complete = acm_cdc_notify_complete;
604 acm->notify_req->context = acm;
605
448 /* copy descriptors, and track endpoint copies */ 606 /* copy descriptors, and track endpoint copies */
449 f->descriptors = usb_copy_descriptors(acm_fs_function); 607 f->descriptors = usb_copy_descriptors(acm_fs_function);
608 if (!f->descriptors)
609 goto fail;
450 610
451 acm->fs.in = usb_find_endpoint(acm_fs_function, 611 acm->fs.in = usb_find_endpoint(acm_fs_function,
452 f->descriptors, &acm_fs_in_desc); 612 f->descriptors, &acm_fs_in_desc);
@@ -478,8 +638,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
478 f->hs_descriptors, &acm_hs_notify_desc); 638 f->hs_descriptors, &acm_hs_notify_desc);
479 } 639 }
480 640
481 /* FIXME provide a callback for triggering notifications */
482
483 DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", 641 DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
484 acm->port_num, 642 acm->port_num,
485 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", 643 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@@ -488,6 +646,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
488 return 0; 646 return 0;
489 647
490fail: 648fail:
649 if (acm->notify_req)
650 gs_free_req(acm->notify, acm->notify_req);
651
491 /* we might as well release our claims on endpoints */ 652 /* we might as well release our claims on endpoints */
492 if (acm->notify) 653 if (acm->notify)
493 acm->notify->driver_data = NULL; 654 acm->notify->driver_data = NULL;
@@ -504,10 +665,13 @@ fail:
504static void 665static void
505acm_unbind(struct usb_configuration *c, struct usb_function *f) 666acm_unbind(struct usb_configuration *c, struct usb_function *f)
506{ 667{
668 struct f_acm *acm = func_to_acm(f);
669
507 if (gadget_is_dualspeed(c->cdev->gadget)) 670 if (gadget_is_dualspeed(c->cdev->gadget))
508 usb_free_descriptors(f->hs_descriptors); 671 usb_free_descriptors(f->hs_descriptors);
509 usb_free_descriptors(f->descriptors); 672 usb_free_descriptors(f->descriptors);
510 kfree(func_to_acm(f)); 673 gs_free_req(acm->notify, acm->notify_req);
674 kfree(acm);
511} 675}
512 676
513/* Some controllers can't support CDC ACM ... */ 677/* Some controllers can't support CDC ACM ... */
@@ -571,8 +735,14 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num)
571 if (!acm) 735 if (!acm)
572 return -ENOMEM; 736 return -ENOMEM;
573 737
738 spin_lock_init(&acm->lock);
739
574 acm->port_num = port_num; 740 acm->port_num = port_num;
575 741
742 acm->port.connect = acm_connect;
743 acm->port.disconnect = acm_disconnect;
744 acm->port.send_break = acm_send_break;
745
576 acm->port.func.name = "acm"; 746 acm->port.func.name = "acm";
577 acm->port.func.strings = acm_strings; 747 acm->port.func.strings = acm_strings;
578 /* descriptors are per-instance copies */ 748 /* descriptors are per-instance copies */
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 0822e9d7693a..a2b5c092bda0 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -63,9 +63,7 @@ struct f_ecm {
63 63
64 char ethaddr[14]; 64 char ethaddr[14];
65 65
66 struct usb_descriptor_header **fs_function;
67 struct ecm_ep_descs fs; 66 struct ecm_ep_descs fs;
68 struct usb_descriptor_header **hs_function;
69 struct ecm_ep_descs hs; 67 struct ecm_ep_descs hs;
70 68
71 struct usb_ep *notify; 69 struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 61652f0f13fd..659b3d9671c4 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -85,9 +85,7 @@ struct f_rndis {
85 u8 ethaddr[ETH_ALEN]; 85 u8 ethaddr[ETH_ALEN];
86 int config; 86 int config;
87 87
88 struct usb_descriptor_header **fs_function;
89 struct rndis_ep_descs fs; 88 struct rndis_ep_descs fs;
90 struct usb_descriptor_header **hs_function;
91 struct rndis_ep_descs hs; 89 struct rndis_ep_descs hs;
92 90
93 struct usb_ep *notify; 91 struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 1b6bde9aaed5..fe5674db344b 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -36,9 +36,7 @@ struct f_gser {
36 u8 data_id; 36 u8 data_id;
37 u8 port_num; 37 u8 port_num;
38 38
39 struct usb_descriptor_header **fs_function;
40 struct gser_descs fs; 39 struct gser_descs fs;
41 struct usb_descriptor_header **hs_function;
42 struct gser_descs hs; 40 struct gser_descs hs;
43}; 41};
44 42
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index afeab9a0523f..acb8d233aa1d 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -66,9 +66,7 @@ struct f_gether {
66 66
67 char ethaddr[14]; 67 char ethaddr[14];
68 68
69 struct usb_descriptor_header **fs_function;
70 struct geth_descs fs; 69 struct geth_descs fs;
71 struct usb_descriptor_header **hs_function;
72 struct geth_descs hs; 70 struct geth_descs hs;
73}; 71};
74 72
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 5246e8fef2b2..17d9905101b7 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -11,6 +11,10 @@
11 * Some are available on 2.4 kernels; several are available, but not 11 * Some are available on 2.4 kernels; several are available, but not
12 * yet pushed in the 2.6 mainline tree. 12 * yet pushed in the 2.6 mainline tree.
13 */ 13 */
14
15#ifndef __GADGET_CHIPS_H
16#define __GADGET_CHIPS_H
17
14#ifdef CONFIG_USB_GADGET_NET2280 18#ifdef CONFIG_USB_GADGET_NET2280
15#define gadget_is_net2280(g) !strcmp("net2280", (g)->name) 19#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
16#else 20#else
@@ -237,3 +241,5 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
237 /* Everything else is *presumably* fine ... */ 241 /* Everything else is *presumably* fine ... */
238 return true; 242 return true;
239} 243}
244
245#endif /* __GADGET_CHIPS_H */
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 376e80c07530..574c53831a05 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -54,6 +54,7 @@
54 54
55#include <mach/dma.h> 55#include <mach/dma.h>
56#include <mach/usb.h> 56#include <mach/usb.h>
57#include <mach/control.h>
57 58
58#include "omap_udc.h" 59#include "omap_udc.h"
59 60
@@ -2310,10 +2311,10 @@ static int proc_otg_show(struct seq_file *s)
2310 u32 trans; 2311 u32 trans;
2311 char *ctrl_name; 2312 char *ctrl_name;
2312 2313
2313 tmp = OTG_REV_REG; 2314 tmp = omap_readl(OTG_REV);
2314 if (cpu_is_omap24xx()) { 2315 if (cpu_is_omap24xx()) {
2315 ctrl_name = "control_devconf"; 2316 ctrl_name = "control_devconf";
2316 trans = CONTROL_DEVCONF_REG; 2317 trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
2317 } else { 2318 } else {
2318 ctrl_name = "tranceiver_ctrl"; 2319 ctrl_name = "tranceiver_ctrl";
2319 trans = omap_readw(USB_TRANSCEIVER_CTRL); 2320 trans = omap_readw(USB_TRANSCEIVER_CTRL);
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index abf9505d3a75..53d59287f2bc 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -52,13 +52,16 @@
52 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. 52 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
53 */ 53 */
54 54
55#define PREFIX "ttyGS"
56
55/* 57/*
56 * gserial is the lifecycle interface, used by USB functions 58 * gserial is the lifecycle interface, used by USB functions
57 * gs_port is the I/O nexus, used by the tty driver 59 * gs_port is the I/O nexus, used by the tty driver
58 * tty_struct links to the tty/filesystem framework 60 * tty_struct links to the tty/filesystem framework
59 * 61 *
60 * gserial <---> gs_port ... links will be null when the USB link is 62 * gserial <---> gs_port ... links will be null when the USB link is
61 * inactive; managed by gserial_{connect,disconnect}(). 63 * inactive; managed by gserial_{connect,disconnect}(). each gserial
64 * instance can wrap its own USB control protocol.
62 * gserial->ioport == usb_ep->driver_data ... gs_port 65 * gserial->ioport == usb_ep->driver_data ... gs_port
63 * gs_port->port_usb ... gserial 66 * gs_port->port_usb ... gserial
64 * 67 *
@@ -100,6 +103,8 @@ struct gs_port {
100 wait_queue_head_t close_wait; /* wait for last close */ 103 wait_queue_head_t close_wait; /* wait for last close */
101 104
102 struct list_head read_pool; 105 struct list_head read_pool;
106 struct list_head read_queue;
107 unsigned n_read;
103 struct tasklet_struct push; 108 struct tasklet_struct push;
104 109
105 struct list_head write_pool; 110 struct list_head write_pool;
@@ -177,7 +182,7 @@ static void gs_buf_clear(struct gs_buf *gb)
177/* 182/*
178 * gs_buf_data_avail 183 * gs_buf_data_avail
179 * 184 *
180 * Return the number of bytes of data available in the circular 185 * Return the number of bytes of data written into the circular
181 * buffer. 186 * buffer.
182 */ 187 */
183static unsigned gs_buf_data_avail(struct gs_buf *gb) 188static unsigned gs_buf_data_avail(struct gs_buf *gb)
@@ -278,7 +283,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
278 * Allocate a usb_request and its buffer. Returns a pointer to the 283 * Allocate a usb_request and its buffer. Returns a pointer to the
279 * usb_request or NULL if there is an error. 284 * usb_request or NULL if there is an error.
280 */ 285 */
281static struct usb_request * 286struct usb_request *
282gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) 287gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
283{ 288{
284 struct usb_request *req; 289 struct usb_request *req;
@@ -302,7 +307,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
302 * 307 *
303 * Free a usb_request and its buffer. 308 * Free a usb_request and its buffer.
304 */ 309 */
305static void gs_free_req(struct usb_ep *ep, struct usb_request *req) 310void gs_free_req(struct usb_ep *ep, struct usb_request *req)
306{ 311{
307 kfree(req->buf); 312 kfree(req->buf);
308 usb_ep_free_request(ep, req); 313 usb_ep_free_request(ep, req);
@@ -367,11 +372,9 @@ __acquires(&port->port_lock)
367 req->length = len; 372 req->length = len;
368 list_del(&req->list); 373 list_del(&req->list);
369 374
370#ifdef VERBOSE_DEBUG 375 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
371 pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n", 376 port->port_num, len, *((u8 *)req->buf),
372 __func__, in->name, len, *((u8 *)req->buf),
373 *((u8 *)req->buf+1), *((u8 *)req->buf+2)); 377 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
374#endif
375 378
376 /* Drop lock while we call out of driver; completions 379 /* Drop lock while we call out of driver; completions
377 * could be issued while we do so. Disconnection may 380 * could be issued while we do so. Disconnection may
@@ -401,56 +404,6 @@ __acquires(&port->port_lock)
401 return status; 404 return status;
402} 405}
403 406
404static void gs_rx_push(unsigned long _port)
405{
406 struct gs_port *port = (void *)_port;
407 struct tty_struct *tty = port->port_tty;
408
409 /* With low_latency, tty_flip_buffer_push() doesn't put its
410 * real work through a workqueue, so the ldisc has a better
411 * chance to keep up with peak USB data rates.
412 */
413 if (tty) {
414 tty_flip_buffer_push(tty);
415 wake_up_interruptible(&tty->read_wait);
416 }
417}
418
419/*
420 * gs_recv_packet
421 *
422 * Called for each USB packet received. Reads the packet
423 * header and stuffs the data in the appropriate tty buffer.
424 * Returns 0 if successful, or a negative error number.
425 *
426 * Called during USB completion routine, on interrupt time.
427 * With port_lock.
428 */
429static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size)
430{
431 unsigned len;
432 struct tty_struct *tty;
433
434 /* I/O completions can continue for a while after close(), until the
435 * request queue empties. Just discard any data we receive, until
436 * something reopens this TTY ... as if there were no HW flow control.
437 */
438 tty = port->port_tty;
439 if (tty == NULL) {
440 pr_vdebug("%s: ttyGS%d, after close\n",
441 __func__, port->port_num);
442 return -EIO;
443 }
444
445 len = tty_insert_flip_string(tty, packet, size);
446 if (len > 0)
447 tasklet_schedule(&port->push);
448 if (len < size)
449 pr_debug("%s: ttyGS%d, drop %d bytes\n",
450 __func__, port->port_num, size - len);
451 return 0;
452}
453
454/* 407/*
455 * Context: caller owns port_lock, and port_usb is set 408 * Context: caller owns port_lock, and port_usb is set
456 */ 409 */
@@ -469,9 +422,9 @@ __acquires(&port->port_lock)
469 int status; 422 int status;
470 struct tty_struct *tty; 423 struct tty_struct *tty;
471 424
472 /* no more rx if closed or throttled */ 425 /* no more rx if closed */
473 tty = port->port_tty; 426 tty = port->port_tty;
474 if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) 427 if (!tty)
475 break; 428 break;
476 429
477 req = list_entry(pool->next, struct usb_request, list); 430 req = list_entry(pool->next, struct usb_request, list);
@@ -500,36 +453,134 @@ __acquires(&port->port_lock)
500 return started; 453 return started;
501} 454}
502 455
503static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) 456/*
457 * RX tasklet takes data out of the RX queue and hands it up to the TTY
458 * layer until it refuses to take any more data (or is throttled back).
459 * Then it issues reads for any further data.
460 *
461 * If the RX queue becomes full enough that no usb_request is queued,
462 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
463 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
464 * can be buffered before the TTY layer's buffers (currently 64 KB).
465 */
466static void gs_rx_push(unsigned long _port)
504{ 467{
505 int status; 468 struct gs_port *port = (void *)_port;
506 struct gs_port *port = ep->driver_data; 469 struct tty_struct *tty;
470 struct list_head *queue = &port->read_queue;
471 bool disconnect = false;
472 bool do_push = false;
507 473
508 spin_lock(&port->port_lock); 474 /* hand any queued data to the tty */
509 list_add(&req->list, &port->read_pool); 475 spin_lock_irq(&port->port_lock);
476 tty = port->port_tty;
477 while (!list_empty(queue)) {
478 struct usb_request *req;
510 479
511 switch (req->status) { 480 req = list_first_entry(queue, struct usb_request, list);
512 case 0:
513 /* normal completion */
514 status = gs_recv_packet(port, req->buf, req->actual);
515 if (status && status != -EIO)
516 pr_debug("%s: %s %s err %d\n",
517 __func__, "recv", ep->name, status);
518 gs_start_rx(port);
519 break;
520 481
521 case -ESHUTDOWN: 482 /* discard data if tty was closed */
522 /* disconnect */ 483 if (!tty)
523 pr_vdebug("%s: %s shutdown\n", __func__, ep->name); 484 goto recycle;
524 break;
525 485
526 default: 486 /* leave data queued if tty was rx throttled */
527 /* presumably a transient fault */ 487 if (test_bit(TTY_THROTTLED, &tty->flags))
528 pr_warning("%s: unexpected %s status %d\n", 488 break;
529 __func__, ep->name, req->status); 489
530 gs_start_rx(port); 490 switch (req->status) {
531 break; 491 case -ESHUTDOWN:
492 disconnect = true;
493 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
494 break;
495
496 default:
497 /* presumably a transient fault */
498 pr_warning(PREFIX "%d: unexpected RX status %d\n",
499 port->port_num, req->status);
500 /* FALLTHROUGH */
501 case 0:
502 /* normal completion */
503 break;
504 }
505
506 /* push data to (open) tty */
507 if (req->actual) {
508 char *packet = req->buf;
509 unsigned size = req->actual;
510 unsigned n;
511 int count;
512
513 /* we may have pushed part of this packet already... */
514 n = port->n_read;
515 if (n) {
516 packet += n;
517 size -= n;
518 }
519
520 count = tty_insert_flip_string(tty, packet, size);
521 if (count)
522 do_push = true;
523 if (count != size) {
524 /* stop pushing; TTY layer can't handle more */
525 port->n_read += count;
526 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
527 port->port_num,
528 count, req->actual);
529 break;
530 }
531 port->n_read = 0;
532 }
533recycle:
534 list_move(&req->list, &port->read_pool);
532 } 535 }
536
537 /* Push from tty to ldisc; this is immediate with low_latency, and
538 * may trigger callbacks to this driver ... so drop the spinlock.
539 */
540 if (tty && do_push) {
541 spin_unlock_irq(&port->port_lock);
542 tty_flip_buffer_push(tty);
543 wake_up_interruptible(&tty->read_wait);
544 spin_lock_irq(&port->port_lock);
545
546 /* tty may have been closed */
547 tty = port->port_tty;
548 }
549
550
551 /* We want our data queue to become empty ASAP, keeping data
552 * in the tty and ldisc (not here). If we couldn't push any
553 * this time around, there may be trouble unless there's an
554 * implicit tty_unthrottle() call on its way...
555 *
556 * REVISIT we should probably add a timer to keep the tasklet
557 * from starving ... but it's not clear that case ever happens.
558 */
559 if (!list_empty(queue) && tty) {
560 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
561 if (do_push)
562 tasklet_schedule(&port->push);
563 else
564 pr_warning(PREFIX "%d: RX not scheduled?\n",
565 port->port_num);
566 }
567 }
568
569 /* If we're still connected, refill the USB RX queue. */
570 if (!disconnect && port->port_usb)
571 gs_start_rx(port);
572
573 spin_unlock_irq(&port->port_lock);
574}
575
576static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
577{
578 struct gs_port *port = ep->driver_data;
579
580 /* Queue all received data until the tty layer is ready for it. */
581 spin_lock(&port->port_lock);
582 list_add_tail(&req->list, &port->read_queue);
583 tasklet_schedule(&port->push);
533 spin_unlock(&port->port_lock); 584 spin_unlock(&port->port_lock);
534} 585}
535 586
@@ -625,6 +676,7 @@ static int gs_start_io(struct gs_port *port)
625 } 676 }
626 677
627 /* queue read requests */ 678 /* queue read requests */
679 port->n_read = 0;
628 started = gs_start_rx(port); 680 started = gs_start_rx(port);
629 681
630 /* unblock any pending writes into our circular buffer */ 682 /* unblock any pending writes into our circular buffer */
@@ -633,9 +685,10 @@ static int gs_start_io(struct gs_port *port)
633 } else { 685 } else {
634 gs_free_requests(ep, head); 686 gs_free_requests(ep, head);
635 gs_free_requests(port->port_usb->in, &port->write_pool); 687 gs_free_requests(port->port_usb->in, &port->write_pool);
688 status = -EIO;
636 } 689 }
637 690
638 return started ? 0 : status; 691 return status;
639} 692}
640 693
641/*-------------------------------------------------------------------------*/ 694/*-------------------------------------------------------------------------*/
@@ -736,10 +789,13 @@ static int gs_open(struct tty_struct *tty, struct file *file)
736 789
737 /* if connected, start the I/O stream */ 790 /* if connected, start the I/O stream */
738 if (port->port_usb) { 791 if (port->port_usb) {
792 struct gserial *gser = port->port_usb;
793
739 pr_debug("gs_open: start ttyGS%d\n", port->port_num); 794 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
740 gs_start_io(port); 795 gs_start_io(port);
741 796
742 /* REVISIT for ACM, issue "network connected" event */ 797 if (gser->connect)
798 gser->connect(gser);
743 } 799 }
744 800
745 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); 801 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
@@ -766,6 +822,7 @@ static int gs_writes_finished(struct gs_port *p)
766static void gs_close(struct tty_struct *tty, struct file *file) 822static void gs_close(struct tty_struct *tty, struct file *file)
767{ 823{
768 struct gs_port *port = tty->driver_data; 824 struct gs_port *port = tty->driver_data;
825 struct gserial *gser;
769 826
770 spin_lock_irq(&port->port_lock); 827 spin_lock_irq(&port->port_lock);
771 828
@@ -785,32 +842,31 @@ static void gs_close(struct tty_struct *tty, struct file *file)
785 port->openclose = true; 842 port->openclose = true;
786 port->open_count = 0; 843 port->open_count = 0;
787 844
788 if (port->port_usb) 845 gser = port->port_usb;
789 /* REVISIT for ACM, issue "network disconnected" event */; 846 if (gser && gser->disconnect)
847 gser->disconnect(gser);
790 848
791 /* wait for circular write buffer to drain, disconnect, or at 849 /* wait for circular write buffer to drain, disconnect, or at
792 * most GS_CLOSE_TIMEOUT seconds; then discard the rest 850 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
793 */ 851 */
794 if (gs_buf_data_avail(&port->port_write_buf) > 0 852 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
795 && port->port_usb) {
796 spin_unlock_irq(&port->port_lock); 853 spin_unlock_irq(&port->port_lock);
797 wait_event_interruptible_timeout(port->drain_wait, 854 wait_event_interruptible_timeout(port->drain_wait,
798 gs_writes_finished(port), 855 gs_writes_finished(port),
799 GS_CLOSE_TIMEOUT * HZ); 856 GS_CLOSE_TIMEOUT * HZ);
800 spin_lock_irq(&port->port_lock); 857 spin_lock_irq(&port->port_lock);
858 gser = port->port_usb;
801 } 859 }
802 860
803 /* Iff we're disconnected, there can be no I/O in flight so it's 861 /* Iff we're disconnected, there can be no I/O in flight so it's
804 * ok to free the circular buffer; else just scrub it. And don't 862 * ok to free the circular buffer; else just scrub it. And don't
805 * let the push tasklet fire again until we're re-opened. 863 * let the push tasklet fire again until we're re-opened.
806 */ 864 */
807 if (port->port_usb == NULL) 865 if (gser == NULL)
808 gs_buf_free(&port->port_write_buf); 866 gs_buf_free(&port->port_write_buf);
809 else 867 else
810 gs_buf_clear(&port->port_write_buf); 868 gs_buf_clear(&port->port_write_buf);
811 869
812 tasklet_kill(&port->push);
813
814 tty->driver_data = NULL; 870 tty->driver_data = NULL;
815 port->port_tty = NULL; 871 port->port_tty = NULL;
816 872
@@ -911,15 +967,35 @@ static void gs_unthrottle(struct tty_struct *tty)
911{ 967{
912 struct gs_port *port = tty->driver_data; 968 struct gs_port *port = tty->driver_data;
913 unsigned long flags; 969 unsigned long flags;
914 unsigned started = 0;
915 970
916 spin_lock_irqsave(&port->port_lock, flags); 971 spin_lock_irqsave(&port->port_lock, flags);
917 if (port->port_usb) 972 if (port->port_usb) {
918 started = gs_start_rx(port); 973 /* Kickstart read queue processing. We don't do xon/xoff,
974 * rts/cts, or other handshaking with the host, but if the
975 * read queue backs up enough we'll be NAKing OUT packets.
976 */
977 tasklet_schedule(&port->push);
978 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
979 }
919 spin_unlock_irqrestore(&port->port_lock, flags); 980 spin_unlock_irqrestore(&port->port_lock, flags);
981}
982
983static int gs_break_ctl(struct tty_struct *tty, int duration)
984{
985 struct gs_port *port = tty->driver_data;
986 int status = 0;
987 struct gserial *gser;
988
989 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
990 port->port_num, duration);
920 991
921 pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n", 992 spin_lock_irq(&port->port_lock);
922 port->port_num, started); 993 gser = port->port_usb;
994 if (gser && gser->send_break)
995 status = gser->send_break(gser, duration);
996 spin_unlock_irq(&port->port_lock);
997
998 return status;
923} 999}
924 1000
925static const struct tty_operations gs_tty_ops = { 1001static const struct tty_operations gs_tty_ops = {
@@ -931,6 +1007,7 @@ static const struct tty_operations gs_tty_ops = {
931 .write_room = gs_write_room, 1007 .write_room = gs_write_room,
932 .chars_in_buffer = gs_chars_in_buffer, 1008 .chars_in_buffer = gs_chars_in_buffer,
933 .unthrottle = gs_unthrottle, 1009 .unthrottle = gs_unthrottle,
1010 .break_ctl = gs_break_ctl,
934}; 1011};
935 1012
936/*-------------------------------------------------------------------------*/ 1013/*-------------------------------------------------------------------------*/
@@ -953,6 +1030,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
953 tasklet_init(&port->push, gs_rx_push, (unsigned long) port); 1030 tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
954 1031
955 INIT_LIST_HEAD(&port->read_pool); 1032 INIT_LIST_HEAD(&port->read_pool);
1033 INIT_LIST_HEAD(&port->read_queue);
956 INIT_LIST_HEAD(&port->write_pool); 1034 INIT_LIST_HEAD(&port->write_pool);
957 1035
958 port->port_num = port_num; 1036 port->port_num = port_num;
@@ -997,7 +1075,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count)
997 1075
998 gs_tty_driver->owner = THIS_MODULE; 1076 gs_tty_driver->owner = THIS_MODULE;
999 gs_tty_driver->driver_name = "g_serial"; 1077 gs_tty_driver->driver_name = "g_serial";
1000 gs_tty_driver->name = "ttyGS"; 1078 gs_tty_driver->name = PREFIX;
1001 /* uses dynamically assigned dev_t values */ 1079 /* uses dynamically assigned dev_t values */
1002 1080
1003 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; 1081 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1104,6 +1182,8 @@ void gserial_cleanup(void)
1104 ports[i].port = NULL; 1182 ports[i].port = NULL;
1105 mutex_unlock(&ports[i].lock); 1183 mutex_unlock(&ports[i].lock);
1106 1184
1185 tasklet_kill(&port->push);
1186
1107 /* wait for old opens to finish */ 1187 /* wait for old opens to finish */
1108 wait_event(port->close_wait, gs_closed(port)); 1188 wait_event(port->close_wait, gs_closed(port));
1109 1189
@@ -1175,14 +1255,17 @@ int gserial_connect(struct gserial *gser, u8 port_num)
1175 1255
1176 /* REVISIT if waiting on "carrier detect", signal. */ 1256 /* REVISIT if waiting on "carrier detect", signal. */
1177 1257
1178 /* REVISIT for ACM, issue "network connection" status notification: 1258 /* if it's already open, start I/O ... and notify the serial
1179 * connected if open_count, else disconnected. 1259 * protocol about open/close status (connect/disconnect).
1180 */ 1260 */
1181
1182 /* if it's already open, start I/O */
1183 if (port->open_count) { 1261 if (port->open_count) {
1184 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); 1262 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1185 gs_start_io(port); 1263 gs_start_io(port);
1264 if (gser->connect)
1265 gser->connect(gser);
1266 } else {
1267 if (gser->disconnect)
1268 gser->disconnect(gser);
1186 } 1269 }
1187 1270
1188 spin_unlock_irqrestore(&port->port_lock, flags); 1271 spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1241,6 +1324,7 @@ void gserial_disconnect(struct gserial *gser)
1241 if (port->open_count == 0 && !port->openclose) 1324 if (port->open_count == 0 && !port->openclose)
1242 gs_buf_free(&port->port_write_buf); 1325 gs_buf_free(&port->port_write_buf);
1243 gs_free_requests(gser->out, &port->read_pool); 1326 gs_free_requests(gser->out, &port->read_pool);
1327 gs_free_requests(gser->out, &port->read_queue);
1244 gs_free_requests(gser->in, &port->write_pool); 1328 gs_free_requests(gser->in, &port->write_pool);
1245 spin_unlock_irqrestore(&port->port_lock, flags); 1329 spin_unlock_irqrestore(&port->port_lock, flags);
1246} 1330}
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 7b561138f90e..af3910d01aea 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -23,8 +23,7 @@
23 * style I/O using the USB peripheral endpoints listed here, including 23 * style I/O using the USB peripheral endpoints listed here, including
24 * hookups to sysfs and /dev for each logical "tty" device. 24 * hookups to sysfs and /dev for each logical "tty" device.
25 * 25 *
26 * REVISIT need TTY --> USB event flow too, so ACM can report open/close 26 * REVISIT at least ACM could support tiocmget() if needed.
27 * as carrier detect events. Model after ECM. There's more ACM state too.
28 * 27 *
29 * REVISIT someday, allow multiplexing several TTYs over these endpoints. 28 * REVISIT someday, allow multiplexing several TTYs over these endpoints.
30 */ 29 */
@@ -41,8 +40,17 @@ struct gserial {
41 40
42 /* REVISIT avoid this CDC-ACM support harder ... */ 41 /* REVISIT avoid this CDC-ACM support harder ... */
43 struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */ 42 struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
43
44 /* notification callbacks */
45 void (*connect)(struct gserial *p);
46 void (*disconnect)(struct gserial *p);
47 int (*send_break)(struct gserial *p, int duration);
44}; 48};
45 49
50/* utilities to allocate/free request and buffer */
51struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
52void gs_free_req(struct usb_ep *, struct usb_request *req);
53
46/* port setup/teardown is handled by gadget driver */ 54/* port setup/teardown is handled by gadget driver */
47int gserial_setup(struct usb_gadget *g, unsigned n_ports); 55int gserial_setup(struct usb_gadget *g, unsigned n_ports);
48void gserial_cleanup(void); 56void gserial_cleanup(void);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 5fbdc14e63b3..5416cf969005 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,7 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/mbus.h> 14#include <linux/mbus.h>
15#include <asm/plat-orion/ehci-orion.h> 15#include <plat/ehci-orion.h>
16 16
17#define rdl(off) __raw_readl(hcd->regs + (off)) 17#define rdl(off) __raw_readl(hcd->regs + (off))
18#define wrl(off, val) __raw_writel((val), hcd->regs + (off)) 18#define wrl(off, val) __raw_writel((val), hcd->regs + (off))
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c858f2adb929..d22a84f86a33 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -126,9 +126,8 @@ static void isp1760_writel(const unsigned int val, __u32 __iomem *regs)
126 * doesn't quite work because some people have to enforce 32-bit access 126 * doesn't quite work because some people have to enforce 32-bit access
127 */ 127 */
128static void priv_read_copy(struct isp1760_hcd *priv, u32 *src, 128static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
129 __u32 __iomem *dst, u32 offset, u32 len) 129 __u32 __iomem *dst, u32 len)
130{ 130{
131 struct usb_hcd *hcd = priv_to_hcd(priv);
132 u32 val; 131 u32 val;
133 u8 *buff8; 132 u8 *buff8;
134 133
@@ -136,11 +135,6 @@ static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
136 printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len); 135 printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
137 return; 136 return;
138 } 137 }
139 isp1760_writel(offset, hcd->regs + HC_MEMORY_REG);
140 /* XXX
141 * 90nsec delay, the spec says something how this could be avoided.
142 */
143 mdelay(1);
144 138
145 while (len >= 4) { 139 while (len >= 4) {
146 *src = __raw_readl(dst); 140 *src = __raw_readl(dst);
@@ -987,8 +981,20 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
987 printk(KERN_ERR "qh is 0\n"); 981 printk(KERN_ERR "qh is 0\n");
988 continue; 982 continue;
989 } 983 }
990 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs, 984 isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs +
991 atl_regs, sizeof(ptd)); 985 HC_MEMORY_REG);
986 isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
987 HC_MEMORY_REG);
988 /*
989 * write bank1 address twice to ensure the 90ns delay (time
990 * between BANK0 write and the priv_read_copy() call is at
991 * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
992 */
993 isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
994 HC_MEMORY_REG);
995
996 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs +
997 ISP_BANK(0), sizeof(ptd));
992 998
993 dw1 = le32_to_cpu(ptd.dw1); 999 dw1 = le32_to_cpu(ptd.dw1);
994 dw2 = le32_to_cpu(ptd.dw2); 1000 dw2 = le32_to_cpu(ptd.dw2);
@@ -1091,7 +1097,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
1091 case IN_PID: 1097 case IN_PID:
1092 priv_read_copy(priv, 1098 priv_read_copy(priv,
1093 priv->atl_ints[queue_entry].data_buffer, 1099 priv->atl_ints[queue_entry].data_buffer,
1094 usb_hcd->regs + payload, payload, 1100 usb_hcd->regs + payload + ISP_BANK(1),
1095 length); 1101 length);
1096 1102
1097 case OUT_PID: 1103 case OUT_PID:
@@ -1122,11 +1128,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
1122 } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) { 1128 } else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
1123 /* short BULK received */ 1129 /* short BULK received */
1124 1130
1125 printk(KERN_ERR "short bulk, %d instead %zu\n", length,
1126 qtd->length);
1127 if (urb->transfer_flags & URB_SHORT_NOT_OK) { 1131 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1128 urb->status = -EREMOTEIO; 1132 urb->status = -EREMOTEIO;
1129 printk(KERN_ERR "not okey\n"); 1133 isp1760_dbg(priv, "short bulk, %d instead %zu "
1134 "with URB_SHORT_NOT_OK flag.\n",
1135 length, qtd->length);
1130 } 1136 }
1131 1137
1132 if (urb->status == -EINPROGRESS) 1138 if (urb->status == -EINPROGRESS)
@@ -1206,8 +1212,20 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
1206 continue; 1212 continue;
1207 } 1213 }
1208 1214
1209 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs, 1215 isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs +
1210 int_regs, sizeof(ptd)); 1216 HC_MEMORY_REG);
1217 isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
1218 HC_MEMORY_REG);
1219 /*
1220 * write bank1 address twice to ensure the 90ns delay (time
1221 * between BANK0 write and the priv_read_copy() call is at
1222 * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
1223 */
1224 isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
1225 HC_MEMORY_REG);
1226
1227 priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs +
1228 ISP_BANK(0), sizeof(ptd));
1211 dw1 = le32_to_cpu(ptd.dw1); 1229 dw1 = le32_to_cpu(ptd.dw1);
1212 dw3 = le32_to_cpu(ptd.dw3); 1230 dw3 = le32_to_cpu(ptd.dw3);
1213 check_int_err_status(le32_to_cpu(ptd.dw4)); 1231 check_int_err_status(le32_to_cpu(ptd.dw4));
@@ -1242,7 +1260,7 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
1242 case IN_PID: 1260 case IN_PID:
1243 priv_read_copy(priv, 1261 priv_read_copy(priv,
1244 priv->int_ints[queue_entry].data_buffer, 1262 priv->int_ints[queue_entry].data_buffer,
1245 usb_hcd->regs + payload , payload, 1263 usb_hcd->regs + payload + ISP_BANK(1),
1246 length); 1264 length);
1247 case OUT_PID: 1265 case OUT_PID:
1248 1266
@@ -1615,8 +1633,7 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1615 return -EPIPE; 1633 return -EPIPE;
1616 } 1634 }
1617 1635
1618 isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe); 1636 return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
1619 return 0;
1620} 1637}
1621 1638
1622static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, 1639static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 6473dd86993c..4377277667d9 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -54,6 +54,8 @@ void deinit_kmem_cache(void);
54#define BUFFER_MAP 0x7 54#define BUFFER_MAP 0x7
55 55
56#define HC_MEMORY_REG 0x33c 56#define HC_MEMORY_REG 0x33c
57#define ISP_BANK(x) ((x) << 16)
58
57#define HC_PORT1_CTRL 0x374 59#define HC_PORT1_CTRL 0x374
58#define PORT1_POWER (3 << 3) 60#define PORT1_POWER (3 << 3)
59#define PORT1_INIT1 (1 << 7) 61#define PORT1_INIT1 (1 << 7)
@@ -119,6 +121,9 @@ struct inter_packet_info {
119typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh, 121typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
120 struct isp1760_qtd *qtd); 122 struct isp1760_qtd *qtd);
121 123
124#define isp1760_dbg(priv, fmt, args...) \
125 dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args)
126
122#define isp1760_info(priv, fmt, args...) \ 127#define isp1760_info(priv, fmt, args...) \
123 dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args) 128 dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
124 129
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 26bc47941d01..89901962cbfd 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -86,6 +86,21 @@ static void ohci_stop (struct usb_hcd *hcd);
86static int ohci_restart (struct ohci_hcd *ohci); 86static int ohci_restart (struct ohci_hcd *ohci);
87#endif 87#endif
88 88
89#ifdef CONFIG_PCI
90static void quirk_amd_pll(int state);
91static void amd_iso_dev_put(void);
92#else
93static inline void quirk_amd_pll(int state)
94{
95 return;
96}
97static inline void amd_iso_dev_put(void)
98{
99 return;
100}
101#endif
102
103
89#include "ohci-hub.c" 104#include "ohci-hub.c"
90#include "ohci-dbg.c" 105#include "ohci-dbg.c"
91#include "ohci-mem.c" 106#include "ohci-mem.c"
@@ -483,6 +498,9 @@ static int ohci_init (struct ohci_hcd *ohci)
483 int ret; 498 int ret;
484 struct usb_hcd *hcd = ohci_to_hcd(ohci); 499 struct usb_hcd *hcd = ohci_to_hcd(ohci);
485 500
501 if (distrust_firmware)
502 ohci->flags |= OHCI_QUIRK_HUB_POWER;
503
486 disable (ohci); 504 disable (ohci);
487 ohci->regs = hcd->regs; 505 ohci->regs = hcd->regs;
488 506
@@ -689,7 +707,8 @@ retry:
689 temp |= RH_A_NOCP; 707 temp |= RH_A_NOCP;
690 temp &= ~(RH_A_POTPGT | RH_A_NPS); 708 temp &= ~(RH_A_POTPGT | RH_A_NPS);
691 ohci_writel (ohci, temp, &ohci->regs->roothub.a); 709 ohci_writel (ohci, temp, &ohci->regs->roothub.a);
692 } else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) { 710 } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
711 (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
693 /* hub power always on; required for AMD-756 and some 712 /* hub power always on; required for AMD-756 and some
694 * Mac platforms. ganged overcurrent reporting, if any. 713 * Mac platforms. ganged overcurrent reporting, if any.
695 */ 714 */
@@ -882,6 +901,8 @@ static void ohci_stop (struct usb_hcd *hcd)
882 901
883 if (quirk_zfmicro(ohci)) 902 if (quirk_zfmicro(ohci))
884 del_timer(&ohci->unlink_watchdog); 903 del_timer(&ohci->unlink_watchdog);
904 if (quirk_amdiso(ohci))
905 amd_iso_dev_put();
885 906
886 remove_debug_files (ohci); 907 remove_debug_files (ohci);
887 ohci_mem_cleanup (ohci); 908 ohci_mem_cleanup (ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index b56739221d11..439beb784f3e 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -483,6 +483,13 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
483 length++; 483 length++;
484 } 484 }
485 485
486 /* Some broken controllers never turn off RHCS in the interrupt
487 * status register. For their sake we won't re-enable RHSC
488 * interrupts if the flag is already set.
489 */
490 if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC)
491 changed = 1;
492
486 /* look at each port */ 493 /* look at each port */
487 for (i = 0; i < ohci->num_ports; i++) { 494 for (i = 0; i < ohci->num_ports; i++) {
488 u32 status = roothub_portstatus (ohci, i); 495 u32 status = roothub_portstatus (ohci, i);
@@ -572,8 +579,6 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
572 return 0; 579 return 0;
573} 580}
574 581
575static void start_hnp(struct ohci_hcd *ohci);
576
577#else 582#else
578 583
579#define ohci_start_port_reset NULL 584#define ohci_start_port_reset NULL
@@ -760,7 +765,7 @@ static int ohci_hub_control (
760#ifdef CONFIG_USB_OTG 765#ifdef CONFIG_USB_OTG
761 if (hcd->self.otg_port == (wIndex + 1) 766 if (hcd->self.otg_port == (wIndex + 1)
762 && hcd->self.b_hnp_enable) 767 && hcd->self.b_hnp_enable)
763 start_hnp(ohci); 768 ohci->start_hnp(ohci);
764 else 769 else
765#endif 770#endif
766 ohci_writel (ohci, RH_PS_PSS, 771 ohci_writel (ohci, RH_PS_PSS,
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 94dfca02f7e1..3d532b709670 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -225,6 +225,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
225 dev_err(hcd->self.controller, "can't find transceiver\n"); 225 dev_err(hcd->self.controller, "can't find transceiver\n");
226 return -ENODEV; 226 return -ENODEV;
227 } 227 }
228 ohci->start_hnp = start_hnp;
228 } 229 }
229#endif 230#endif
230 231
@@ -260,7 +261,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
260 omap_cfg_reg(W4_USB_HIGHZ); 261 omap_cfg_reg(W4_USB_HIGHZ);
261 } 262 }
262 ohci_writel(ohci, rh, &ohci->regs->roothub.a); 263 ohci_writel(ohci, rh, &ohci->regs->roothub.a);
263 distrust_firmware = 0; 264 ohci->flags &= ~OHCI_QUIRK_HUB_POWER;
264 } else if (machine_is_nokia770()) { 265 } else if (machine_is_nokia770()) {
265 /* We require a self-powered hub, which should have 266 /* We require a self-powered hub, which should have
266 * plenty of power. */ 267 * plenty of power. */
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 4696cc912e16..083e8df0a817 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -18,6 +18,28 @@
18#error "This file is PCI bus glue. CONFIG_PCI must be defined." 18#error "This file is PCI bus glue. CONFIG_PCI must be defined."
19#endif 19#endif
20 20
21#include <linux/pci.h>
22#include <linux/io.h>
23
24
25/* constants used to work around PM-related transfer
26 * glitches in some AMD 700 series southbridges
27 */
28#define AB_REG_BAR 0xf0
29#define AB_INDX(addr) ((addr) + 0x00)
30#define AB_DATA(addr) ((addr) + 0x04)
31#define AX_INDXC 0X30
32#define AX_DATAC 0x34
33
34#define NB_PCIE_INDX_ADDR 0xe0
35#define NB_PCIE_INDX_DATA 0xe4
36#define PCIE_P_CNTL 0x10040
37#define BIF_NB 0x10002
38
39static struct pci_dev *amd_smbus_dev;
40static struct pci_dev *amd_hb_dev;
41static int amd_ohci_iso_count;
42
21/*-------------------------------------------------------------------------*/ 43/*-------------------------------------------------------------------------*/
22 44
23static int broken_suspend(struct usb_hcd *hcd) 45static int broken_suspend(struct usb_hcd *hcd)
@@ -143,6 +165,103 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
143 return 0; 165 return 0;
144} 166}
145 167
168static int ohci_quirk_amd700(struct usb_hcd *hcd)
169{
170 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
171 u8 rev = 0;
172
173 if (!amd_smbus_dev)
174 amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
175 PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
176 if (!amd_smbus_dev)
177 return 0;
178
179 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
180 if ((rev > 0x3b) || (rev < 0x30)) {
181 pci_dev_put(amd_smbus_dev);
182 amd_smbus_dev = NULL;
183 return 0;
184 }
185
186 amd_ohci_iso_count++;
187
188 if (!amd_hb_dev)
189 amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL);
190
191 ohci->flags |= OHCI_QUIRK_AMD_ISO;
192 ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n");
193
194 return 0;
195}
196
197/*
198 * The hardware normally enables the A-link power management feature, which
199 * lets the system lower the power consumption in idle states.
200 *
201 * Assume the system is configured to have USB 1.1 ISO transfers going
202 * to or from a USB device. Without this quirk, that stream may stutter
203 * or have breaks occasionally. For transfers going to speakers, this
204 * makes a very audible mess...
205 *
206 * That audio playback corruption is due to the audio stream getting
207 * interrupted occasionally when the link goes in lower power state
208 * This USB quirk prevents the link going into that lower power state
209 * during audio playback or other ISO operations.
210 */
211static void quirk_amd_pll(int on)
212{
213 u32 addr;
214 u32 val;
215 u32 bit = (on > 0) ? 1 : 0;
216
217 pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr);
218
219 /* BIT names/meanings are NDA-protected, sorry ... */
220
221 outl(AX_INDXC, AB_INDX(addr));
222 outl(0x40, AB_DATA(addr));
223 outl(AX_DATAC, AB_INDX(addr));
224 val = inl(AB_DATA(addr));
225 val &= ~((1 << 3) | (1 << 4) | (1 << 9));
226 val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9);
227 outl(val, AB_DATA(addr));
228
229 if (amd_hb_dev) {
230 addr = PCIE_P_CNTL;
231 pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
232
233 pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
234 val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
235 val |= bit | (bit << 3) | (bit << 12);
236 val |= ((!bit) << 4) | ((!bit) << 9);
237 pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
238
239 addr = BIF_NB;
240 pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
241
242 pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
243 val &= ~(1 << 8);
244 val |= bit << 8;
245 pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
246 }
247}
248
249static void amd_iso_dev_put(void)
250{
251 amd_ohci_iso_count--;
252 if (amd_ohci_iso_count == 0) {
253 if (amd_smbus_dev) {
254 pci_dev_put(amd_smbus_dev);
255 amd_smbus_dev = NULL;
256 }
257 if (amd_hb_dev) {
258 pci_dev_put(amd_hb_dev);
259 amd_hb_dev = NULL;
260 }
261 }
262
263}
264
146/* List of quirks for OHCI */ 265/* List of quirks for OHCI */
147static const struct pci_device_id ohci_pci_quirks[] = { 266static const struct pci_device_id ohci_pci_quirks[] = {
148 { 267 {
@@ -181,6 +300,19 @@ static const struct pci_device_id ohci_pci_quirks[] = {
181 PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152), 300 PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152),
182 .driver_data = (unsigned long) broken_suspend, 301 .driver_data = (unsigned long) broken_suspend,
183 }, 302 },
303 {
304 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397),
305 .driver_data = (unsigned long)ohci_quirk_amd700,
306 },
307 {
308 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398),
309 .driver_data = (unsigned long)ohci_quirk_amd700,
310 },
311 {
312 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
313 .driver_data = (unsigned long)ohci_quirk_amd700,
314 },
315
184 /* FIXME for some of the early AMD 760 southbridges, OHCI 316 /* FIXME for some of the early AMD 760 southbridges, OHCI
185 * won't work at all. blacklist them. 317 * won't work at all. blacklist them.
186 */ 318 */
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 6a9b4c557953..c2d80f80448b 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -49,6 +49,9 @@ __acquires(ohci->lock)
49 switch (usb_pipetype (urb->pipe)) { 49 switch (usb_pipetype (urb->pipe)) {
50 case PIPE_ISOCHRONOUS: 50 case PIPE_ISOCHRONOUS:
51 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; 51 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
52 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
53 && quirk_amdiso(ohci))
54 quirk_amd_pll(1);
52 break; 55 break;
53 case PIPE_INTERRUPT: 56 case PIPE_INTERRUPT:
54 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; 57 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
@@ -677,6 +680,9 @@ static void td_submit_urb (
677 data + urb->iso_frame_desc [cnt].offset, 680 data + urb->iso_frame_desc [cnt].offset,
678 urb->iso_frame_desc [cnt].length, urb, cnt); 681 urb->iso_frame_desc [cnt].length, urb, cnt);
679 } 682 }
683 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
684 && quirk_amdiso(ohci))
685 quirk_amd_pll(0);
680 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 686 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
681 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; 687 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
682 break; 688 break;
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index dc544ddc7849..faf622eafce7 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -371,6 +371,7 @@ struct ohci_hcd {
371 * other external transceivers should be software-transparent 371 * other external transceivers should be software-transparent
372 */ 372 */
373 struct otg_transceiver *transceiver; 373 struct otg_transceiver *transceiver;
374 void (*start_hnp)(struct ohci_hcd *ohci);
374 375
375 /* 376 /*
376 * memory management for queue data structures 377 * memory management for queue data structures
@@ -399,6 +400,8 @@ struct ohci_hcd {
399#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/ 400#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
400#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */ 401#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */
401#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ 402#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
404#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
402 // there are also chip quirks/bugs in init logic 405 // there are also chip quirks/bugs in init logic
403 406
404 struct work_struct nec_work; /* Worker for NEC quirk */ 407 struct work_struct nec_work; /* Worker for NEC quirk */
@@ -426,6 +429,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
426{ 429{
427 return ohci->flags & OHCI_QUIRK_ZFMICRO; 430 return ohci->flags & OHCI_QUIRK_ZFMICRO;
428} 431}
432static inline int quirk_amdiso(struct ohci_hcd *ohci)
433{
434 return ohci->flags & OHCI_QUIRK_AMD_ISO;
435}
429#else 436#else
430static inline int quirk_nec(struct ohci_hcd *ohci) 437static inline int quirk_nec(struct ohci_hcd *ohci)
431{ 438{
@@ -435,6 +442,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
435{ 442{
436 return 0; 443 return 0;
437} 444}
445static inline int quirk_amdiso(struct ohci_hcd *ohci)
446{
447 return 0;
448}
438#endif 449#endif
439 450
440/* convert between an hcd pointer and the corresponding ohci_hcd */ 451/* convert between an hcd pointer and the corresponding ohci_hcd */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index d5f02dddb120..ea7126f99cab 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -964,11 +964,34 @@ static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
964 disable_irq_nrdy(r8a66597, pipenum); 964 disable_irq_nrdy(r8a66597, pipenum);
965} 965}
966 966
967static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
968{
969 mod_timer(&r8a66597->rh_timer,
970 jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
971}
972
973static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
974 int connect)
975{
976 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
977
978 rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
979 rh->scount = R8A66597_MAX_SAMPLING;
980 if (connect)
981 rh->port |= 1 << USB_PORT_FEAT_CONNECTION;
982 else
983 rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION);
984 rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION;
985
986 r8a66597_root_hub_start_polling(r8a66597);
987}
988
967/* this function must be called with interrupt disabled */ 989/* this function must be called with interrupt disabled */
968static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, 990static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
969 u16 syssts) 991 u16 syssts)
970{ 992{
971 if (syssts == SE0) { 993 if (syssts == SE0) {
994 r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
972 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); 995 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
973 return; 996 return;
974 } 997 }
@@ -1002,13 +1025,10 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
1002{ 1025{
1003 struct r8a66597_device *dev = r8a66597->root_hub[port].dev; 1026 struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
1004 1027
1005 r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION);
1006 r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION);
1007
1008 disable_r8a66597_pipe_all(r8a66597, dev); 1028 disable_r8a66597_pipe_all(r8a66597, dev);
1009 free_usb_address(r8a66597, dev); 1029 free_usb_address(r8a66597, dev);
1010 1030
1011 r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); 1031 start_root_hub_sampling(r8a66597, port, 0);
1012} 1032}
1013 1033
1014/* this function must be called with interrupt disabled */ 1034/* this function must be called with interrupt disabled */
@@ -1551,23 +1571,6 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
1551 } 1571 }
1552} 1572}
1553 1573
1554static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
1555{
1556 mod_timer(&r8a66597->rh_timer,
1557 jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
1558}
1559
1560static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port)
1561{
1562 struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
1563
1564 rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
1565 rh->scount = R8A66597_MAX_SAMPLING;
1566 r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION)
1567 | (1 << USB_PORT_FEAT_C_CONNECTION);
1568 r8a66597_root_hub_start_polling(r8a66597);
1569}
1570
1571static irqreturn_t r8a66597_irq(struct usb_hcd *hcd) 1574static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
1572{ 1575{
1573 struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd); 1576 struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
@@ -1594,7 +1597,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
1594 r8a66597_bclr(r8a66597, ATTCHE, INTENB2); 1597 r8a66597_bclr(r8a66597, ATTCHE, INTENB2);
1595 1598
1596 /* start usb bus sampling */ 1599 /* start usb bus sampling */
1597 start_root_hub_sampling(r8a66597, 1); 1600 start_root_hub_sampling(r8a66597, 1, 1);
1598 } 1601 }
1599 if (mask2 & DTCH) { 1602 if (mask2 & DTCH) {
1600 r8a66597_write(r8a66597, ~DTCH, INTSTS2); 1603 r8a66597_write(r8a66597, ~DTCH, INTSTS2);
@@ -1609,7 +1612,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
1609 r8a66597_bclr(r8a66597, ATTCHE, INTENB1); 1612 r8a66597_bclr(r8a66597, ATTCHE, INTENB1);
1610 1613
1611 /* start usb bus sampling */ 1614 /* start usb bus sampling */
1612 start_root_hub_sampling(r8a66597, 0); 1615 start_root_hub_sampling(r8a66597, 0, 1);
1613 } 1616 }
1614 if (mask1 & DTCH) { 1617 if (mask1 & DTCH) {
1615 r8a66597_write(r8a66597, ~DTCH, INTSTS1); 1618 r8a66597_write(r8a66597, ~DTCH, INTSTS1);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 001789c9a11a..4ea50e0abcbb 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -42,16 +42,6 @@ config USB_ADUTUX
42 To compile this driver as a module, choose M here. The module 42 To compile this driver as a module, choose M here. The module
43 will be called adutux. 43 will be called adutux.
44 44
45config USB_AUERSWALD
46 tristate "USB Auerswald ISDN support"
47 depends on USB
48 help
49 Say Y here if you want to connect an Auerswald USB ISDN Device
50 to your computer's USB port.
51
52 To compile this driver as a module, choose M here: the
53 module will be called auerswald.
54
55config USB_RIO500 45config USB_RIO500
56 tristate "USB Diamond Rio500 support" 46 tristate "USB Diamond Rio500 support"
57 depends on USB 47 depends on USB
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index aba091cb5ec0..45b4e12afb08 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
5 5
6obj-$(CONFIG_USB_ADUTUX) += adutux.o 6obj-$(CONFIG_USB_ADUTUX) += adutux.o
7obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o 7obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
8obj-$(CONFIG_USB_AUERSWALD) += auerswald.o
9obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o 8obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
10obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o 9obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
11obj-$(CONFIG_USB_CYTHERM) += cytherm.o 10obj-$(CONFIG_USB_CYTHERM) += cytherm.o
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
deleted file mode 100644
index d2f61d5510e7..000000000000
--- a/drivers/usb/misc/auerswald.c
+++ /dev/null
@@ -1,2152 +0,0 @@
1/*****************************************************************************/
2/*
3 * auerswald.c -- Auerswald PBX/System Telephone usb driver.
4 *
5 * Copyright (C) 2001 Wolfgang Mües (wolfgang@iksw-muees.de)
6 *
7 * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl)
8 * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24 /*****************************************************************************/
25
26/* Standard Linux module include files */
27#include <asm/uaccess.h>
28#include <asm/byteorder.h>
29#include <linux/slab.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/wait.h>
33#include <linux/usb.h>
34#include <linux/mutex.h>
35
36/*-------------------------------------------------------------------*/
37/* Debug support */
38#ifdef DEBUG
39#define dump( adr, len) \
40do { \
41 unsigned int u; \
42 printk (KERN_DEBUG); \
43 for (u = 0; u < len; u++) \
44 printk (" %02X", adr[u] & 0xFF); \
45 printk ("\n"); \
46} while (0)
47#else
48#define dump( adr, len)
49#endif
50
51/*-------------------------------------------------------------------*/
52/* Version Information */
53#define DRIVER_VERSION "0.9.11"
54#define DRIVER_AUTHOR "Wolfgang Mües <wolfgang@iksw-muees.de>"
55#define DRIVER_DESC "Auerswald PBX/System Telephone usb driver"
56
57/*-------------------------------------------------------------------*/
58/* Private declarations for Auerswald USB driver */
59
60/* Auerswald Vendor ID */
61#define ID_AUERSWALD 0x09BF
62
63#define AUER_MINOR_BASE 112 /* auerswald driver minor number */
64
65/* we can have up to this number of device plugged in at once */
66#define AUER_MAX_DEVICES 16
67
68
69/* Number of read buffers for each device */
70#define AU_RBUFFERS 10
71
72/* Number of chain elements for each control chain */
73#define AUCH_ELEMENTS 20
74
75/* Number of retries in communication */
76#define AU_RETRIES 10
77
78/*-------------------------------------------------------------------*/
79/* vendor specific protocol */
80/* Header Byte */
81#define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */
82#define AUH_DIRECT 0x00 /* data is for USB device */
83#define AUH_INDIRECT 0x80 /* USB device is relay */
84
85#define AUH_SPLITMASK 0x40 /* mask for split bit */
86#define AUH_UNSPLIT 0x00 /* data block is full-size */
87#define AUH_SPLIT 0x40 /* data block is part of a larger one,
88 split-byte follows */
89
90#define AUH_TYPEMASK 0x3F /* mask for type of data transfer */
91#define AUH_TYPESIZE 0x40 /* different types */
92#define AUH_DCHANNEL 0x00 /* D channel data */
93#define AUH_B1CHANNEL 0x01 /* B1 channel transparent */
94#define AUH_B2CHANNEL 0x02 /* B2 channel transparent */
95/* 0x03..0x0F reserved for driver internal use */
96#define AUH_COMMAND 0x10 /* Command channel */
97#define AUH_BPROT 0x11 /* Configuration block protocol */
98#define AUH_DPROTANA 0x12 /* D channel protocol analyzer */
99#define AUH_TAPI 0x13 /* telephone api data (ATD) */
100/* 0x14..0x3F reserved for other protocols */
101#define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */
102#define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */
103
104#define AUH_SIZE 1 /* Size of Header Byte */
105
106/* Split Byte. Only present if split bit in header byte set.*/
107#define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */
108#define AUS_FIRST 0x80 /* first block */
109#define AUS_FOLLOW 0x00 /* following block */
110
111#define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */
112#define AUS_END 0x40 /* last block */
113#define AUS_NOEND 0x00 /* not the last block */
114
115#define AUS_LENMASK 0x3F /* mask for block length information */
116
117/* Request types */
118#define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */
119#define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */
120
121/* Vendor Requests */
122#define AUV_GETINFO 0x00 /* GetDeviceInfo */
123#define AUV_WBLOCK 0x01 /* Write Block */
124#define AUV_RBLOCK 0x02 /* Read Block */
125#define AUV_CHANNELCTL 0x03 /* Channel Control */
126#define AUV_DUMMY 0x04 /* Dummy Out for retry */
127
128/* Device Info Types */
129#define AUDI_NUMBCH 0x0000 /* Number of supported B channels */
130#define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */
131#define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */
132
133/* Interrupt endpoint definitions */
134#define AU_IRQENDP 1 /* Endpoint number */
135#define AU_IRQCMDID 16 /* Command-block ID */
136#define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */
137#define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */
138
139/* Device String Descriptors */
140#define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */
141#define AUSI_DEVICE 2 /* Name of the Device */
142#define AUSI_SERIALNR 3 /* Serial Number */
143#define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */
144
145#define AUSI_DLEN 100 /* Max. Length of Device Description */
146
147#define AUV_RETRY 0x101 /* First Firmware version which can do control retries */
148
149/*-------------------------------------------------------------------*/
150/* External data structures / Interface */
151typedef struct
152{
153 char __user *buf; /* return buffer for string contents */
154 unsigned int bsize; /* size of return buffer */
155} audevinfo_t,*paudevinfo_t;
156
157/* IO controls */
158#define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */
159#define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */
160#define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */
161#define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */
162#define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */
163#define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */
164#define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */
165/* 'U' 0xF7..0xFF reseved */
166
167/*-------------------------------------------------------------------*/
168/* Internal data structures */
169
170/* ..................................................................*/
171/* urb chain element */
172struct auerchain; /* forward for circular reference */
173typedef struct
174{
175 struct auerchain *chain; /* pointer to the chain to which this element belongs */
176 struct urb * urbp; /* pointer to attached urb */
177 void *context; /* saved URB context */
178 usb_complete_t complete; /* saved URB completion function */
179 struct list_head list; /* to include element into a list */
180} auerchainelement_t,*pauerchainelement_t;
181
182/* urb chain */
183typedef struct auerchain
184{
185 pauerchainelement_t active; /* element which is submitted to urb */
186 spinlock_t lock; /* protection agains interrupts */
187 struct list_head waiting_list; /* list of waiting elements */
188 struct list_head free_list; /* list of available elements */
189} auerchain_t,*pauerchain_t;
190
191/* urb blocking completion helper struct */
192typedef struct
193{
194 wait_queue_head_t wqh; /* wait for completion */
195 unsigned int done; /* completion flag */
196} auerchain_chs_t,*pauerchain_chs_t;
197
198/* ...................................................................*/
199/* buffer element */
200struct auerbufctl; /* forward */
201typedef struct
202{
203 char *bufp; /* reference to allocated data buffer */
204 unsigned int len; /* number of characters in data buffer */
205 unsigned int retries; /* for urb retries */
206 struct usb_ctrlrequest *dr; /* for setup data in control messages */
207 struct urb * urbp; /* USB urb */
208 struct auerbufctl *list; /* pointer to list */
209 struct list_head buff_list; /* reference to next buffer in list */
210} auerbuf_t,*pauerbuf_t;
211
212/* buffer list control block */
213typedef struct auerbufctl
214{
215 spinlock_t lock; /* protection in interrupt */
216 struct list_head free_buff_list;/* free buffers */
217 struct list_head rec_buff_list; /* buffers with receive data */
218} auerbufctl_t,*pauerbufctl_t;
219
220/* ...................................................................*/
221/* service context */
222struct auerscon; /* forward */
223typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t);
224typedef void (*auer_disconn_t) (struct auerscon*);
225typedef struct auerscon
226{
227 unsigned int id; /* protocol service id AUH_xxxx */
228 auer_dispatch_t dispatch; /* dispatch read buffer */
229 auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */
230} auerscon_t,*pauerscon_t;
231
232/* ...................................................................*/
233/* USB device context */
234typedef struct
235{
236 struct mutex mutex; /* protection in user context */
237 char name[20]; /* name of the /dev/usb entry */
238 unsigned int dtindex; /* index in the device table */
239 struct usb_device * usbdev; /* USB device handle */
240 int open_count; /* count the number of open character channels */
241 char dev_desc[AUSI_DLEN];/* for storing a textual description */
242 unsigned int maxControlLength; /* max. Length of control paket (without header) */
243 struct urb * inturbp; /* interrupt urb */
244 char * intbufp; /* data buffer for interrupt urb */
245 unsigned int irqsize; /* size of interrupt endpoint 1 */
246 struct auerchain controlchain; /* for chaining of control messages */
247 auerbufctl_t bufctl; /* Buffer control for control transfers */
248 pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */
249 unsigned int version; /* Version of the device */
250 wait_queue_head_t bufferwait; /* wait for a control buffer */
251} auerswald_t,*pauerswald_t;
252
253/* ................................................................... */
254/* character device context */
255typedef struct
256{
257 struct mutex mutex; /* protection in user context */
258 pauerswald_t auerdev; /* context pointer of assigned device */
259 auerbufctl_t bufctl; /* controls the buffer chain */
260 auerscon_t scontext; /* service context */
261 wait_queue_head_t readwait; /* for synchronous reading */
262 struct mutex readmutex; /* protection against multiple reads */
263 pauerbuf_t readbuf; /* buffer held for partial reading */
264 unsigned int readoffset; /* current offset in readbuf */
265 unsigned int removed; /* is != 0 if device is removed */
266} auerchar_t,*pauerchar_t;
267
268
269/*-------------------------------------------------------------------*/
270/* Forwards */
271static void auerswald_ctrlread_complete (struct urb * urb);
272static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp);
273static struct usb_driver auerswald_driver;
274
275
276/*-------------------------------------------------------------------*/
277/* USB chain helper functions */
278/* -------------------------- */
279
280/* completion function for chained urbs */
281static void auerchain_complete (struct urb * urb)
282{
283 unsigned long flags;
284 int result;
285
286 /* get pointer to element and to chain */
287 pauerchainelement_t acep = urb->context;
288 pauerchain_t acp = acep->chain;
289
290 /* restore original entries in urb */
291 urb->context = acep->context;
292 urb->complete = acep->complete;
293
294 dbg ("auerchain_complete called");
295
296 /* call original completion function
297 NOTE: this function may lead to more urbs submitted into the chain.
298 (no chain lock at calling complete()!)
299 acp->active != NULL is protecting us against recursion.*/
300 urb->complete (urb);
301
302 /* detach element from chain data structure */
303 spin_lock_irqsave (&acp->lock, flags);
304 if (acp->active != acep) /* paranoia debug check */
305 dbg ("auerchain_complete: completion on non-active element called!");
306 else
307 acp->active = NULL;
308
309 /* add the used chain element to the list of free elements */
310 list_add_tail (&acep->list, &acp->free_list);
311 acep = NULL;
312
313 /* is there a new element waiting in the chain? */
314 if (!acp->active && !list_empty (&acp->waiting_list)) {
315 /* yes: get the entry */
316 struct list_head *tmp = acp->waiting_list.next;
317 list_del (tmp);
318 acep = list_entry (tmp, auerchainelement_t, list);
319 acp->active = acep;
320 }
321 spin_unlock_irqrestore (&acp->lock, flags);
322
323 /* submit the new urb */
324 if (acep) {
325 urb = acep->urbp;
326 dbg ("auerchain_complete: submitting next urb from chain");
327 urb->status = 0; /* needed! */
328 result = usb_submit_urb(urb, GFP_ATOMIC);
329
330 /* check for submit errors */
331 if (result) {
332 urb->status = result;
333 dbg("auerchain_complete: usb_submit_urb with error code %d", result);
334 /* and do error handling via *this* completion function (recursive) */
335 auerchain_complete( urb);
336 }
337 } else {
338 /* simple return without submitting a new urb.
339 The empty chain is detected with acp->active == NULL. */
340 };
341}
342
343
344/* submit function for chained urbs
345 this function may be called from completion context or from user space!
346 early = 1 -> submit in front of chain
347*/
348static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int early)
349{
350 int result;
351 unsigned long flags;
352 pauerchainelement_t acep = NULL;
353
354 dbg ("auerchain_submit_urb called");
355
356 /* try to get a chain element */
357 spin_lock_irqsave (&acp->lock, flags);
358 if (!list_empty (&acp->free_list)) {
359 /* yes: get the entry */
360 struct list_head *tmp = acp->free_list.next;
361 list_del (tmp);
362 acep = list_entry (tmp, auerchainelement_t, list);
363 }
364 spin_unlock_irqrestore (&acp->lock, flags);
365
366 /* if no chain element available: return with error */
367 if (!acep) {
368 return -ENOMEM;
369 }
370
371 /* fill in the new chain element values */
372 acep->chain = acp;
373 acep->context = urb->context;
374 acep->complete = urb->complete;
375 acep->urbp = urb;
376 INIT_LIST_HEAD (&acep->list);
377
378 /* modify urb */
379 urb->context = acep;
380 urb->complete = auerchain_complete;
381 urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */
382
383 /* add element to chain - or start it immediately */
384 spin_lock_irqsave (&acp->lock, flags);
385 if (acp->active) {
386 /* there is traffic in the chain, simple add element to chain */
387 if (early) {
388 dbg ("adding new urb to head of chain");
389 list_add (&acep->list, &acp->waiting_list);
390 } else {
391 dbg ("adding new urb to end of chain");
392 list_add_tail (&acep->list, &acp->waiting_list);
393 }
394 acep = NULL;
395 } else {
396 /* the chain is empty. Prepare restart */
397 acp->active = acep;
398 }
399 /* Spin has to be removed before usb_submit_urb! */
400 spin_unlock_irqrestore (&acp->lock, flags);
401
402 /* Submit urb if immediate restart */
403 if (acep) {
404 dbg("submitting urb immediate");
405 urb->status = 0; /* needed! */
406 result = usb_submit_urb(urb, GFP_ATOMIC);
407 /* check for submit errors */
408 if (result) {
409 urb->status = result;
410 dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result);
411 /* and do error handling via completion function */
412 auerchain_complete( urb);
413 }
414 }
415
416 return 0;
417}
418
419/* submit function for chained urbs
420 this function may be called from completion context or from user space!
421*/
422static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb)
423{
424 return auerchain_submit_urb_list (acp, urb, 0);
425}
426
427/* cancel an urb which is submitted to the chain
428 the result is 0 if the urb is cancelled, or -EINPROGRESS if
429 the function is successfully started.
430*/
431static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb)
432{
433 unsigned long flags;
434 struct urb * urbp;
435 pauerchainelement_t acep;
436 struct list_head *tmp;
437
438 dbg ("auerchain_unlink_urb called");
439
440 /* search the chain of waiting elements */
441 spin_lock_irqsave (&acp->lock, flags);
442 list_for_each (tmp, &acp->waiting_list) {
443 acep = list_entry (tmp, auerchainelement_t, list);
444 if (acep->urbp == urb) {
445 list_del (tmp);
446 urb->context = acep->context;
447 urb->complete = acep->complete;
448 list_add_tail (&acep->list, &acp->free_list);
449 spin_unlock_irqrestore (&acp->lock, flags);
450 dbg ("unlink waiting urb");
451 urb->status = -ENOENT;
452 urb->complete (urb);
453 return 0;
454 }
455 }
456 /* not found. */
457 spin_unlock_irqrestore (&acp->lock, flags);
458
459 /* get the active urb */
460 acep = acp->active;
461 if (acep) {
462 urbp = acep->urbp;
463
464 /* check if we have to cancel the active urb */
465 if (urbp == urb) {
466 /* note that there is a race condition between the check above
467 and the unlink() call because of no lock. This race is harmless,
468 because the usb module will detect the unlink() after completion.
469 We can't use the acp->lock here because the completion function
470 wants to grab it.
471 */
472 dbg ("unlink active urb");
473 return usb_unlink_urb (urbp);
474 }
475 }
476
477 /* not found anyway
478 ... is some kind of success
479 */
480 dbg ("urb to unlink not found in chain");
481 return 0;
482}
483
484/* cancel all urbs which are in the chain.
485 this function must not be called from interrupt or completion handler.
486*/
487static void auerchain_unlink_all (pauerchain_t acp)
488{
489 unsigned long flags;
490 struct urb * urbp;
491 pauerchainelement_t acep;
492
493 dbg ("auerchain_unlink_all called");
494
495 /* clear the chain of waiting elements */
496 spin_lock_irqsave (&acp->lock, flags);
497 while (!list_empty (&acp->waiting_list)) {
498 /* get the next entry */
499 struct list_head *tmp = acp->waiting_list.next;
500 list_del (tmp);
501 acep = list_entry (tmp, auerchainelement_t, list);
502 urbp = acep->urbp;
503 urbp->context = acep->context;
504 urbp->complete = acep->complete;
505 list_add_tail (&acep->list, &acp->free_list);
506 spin_unlock_irqrestore (&acp->lock, flags);
507 dbg ("unlink waiting urb");
508 urbp->status = -ENOENT;
509 urbp->complete (urbp);
510 spin_lock_irqsave (&acp->lock, flags);
511 }
512 spin_unlock_irqrestore (&acp->lock, flags);
513
514 /* clear the active urb */
515 acep = acp->active;
516 if (acep) {
517 urbp = acep->urbp;
518 dbg ("unlink active urb");
519 usb_kill_urb (urbp);
520 }
521}
522
523
524/* free the chain.
525 this function must not be called from interrupt or completion handler.
526*/
527static void auerchain_free (pauerchain_t acp)
528{
529 unsigned long flags;
530 pauerchainelement_t acep;
531
532 dbg ("auerchain_free called");
533
534 /* first, cancel all pending urbs */
535 auerchain_unlink_all (acp);
536
537 /* free the elements */
538 spin_lock_irqsave (&acp->lock, flags);
539 while (!list_empty (&acp->free_list)) {
540 /* get the next entry */
541 struct list_head *tmp = acp->free_list.next;
542 list_del (tmp);
543 spin_unlock_irqrestore (&acp->lock, flags);
544 acep = list_entry (tmp, auerchainelement_t, list);
545 kfree (acep);
546 spin_lock_irqsave (&acp->lock, flags);
547 }
548 spin_unlock_irqrestore (&acp->lock, flags);
549}
550
551
552/* Init the chain control structure */
553static void auerchain_init (pauerchain_t acp)
554{
555 /* init the chain data structure */
556 acp->active = NULL;
557 spin_lock_init (&acp->lock);
558 INIT_LIST_HEAD (&acp->waiting_list);
559 INIT_LIST_HEAD (&acp->free_list);
560}
561
562/* setup a chain.
563 It is assumed that there is no concurrency while setting up the chain
564 requirement: auerchain_init()
565*/
566static int auerchain_setup (pauerchain_t acp, unsigned int numElements)
567{
568 pauerchainelement_t acep;
569
570 dbg ("auerchain_setup called with %d elements", numElements);
571
572 /* fill the list of free elements */
573 for (;numElements; numElements--) {
574 acep = kzalloc(sizeof(auerchainelement_t), GFP_KERNEL);
575 if (!acep)
576 goto ac_fail;
577 INIT_LIST_HEAD (&acep->list);
578 list_add_tail (&acep->list, &acp->free_list);
579 }
580 return 0;
581
582ac_fail:/* free the elements */
583 while (!list_empty (&acp->free_list)) {
584 /* get the next entry */
585 struct list_head *tmp = acp->free_list.next;
586 list_del (tmp);
587 acep = list_entry (tmp, auerchainelement_t, list);
588 kfree (acep);
589 }
590 return -ENOMEM;
591}
592
593
594/* completion handler for synchronous chained URBs */
595static void auerchain_blocking_completion (struct urb *urb)
596{
597 pauerchain_chs_t pchs = urb->context;
598 pchs->done = 1;
599 wmb();
600 wake_up (&pchs->wqh);
601}
602
603
604/* Starts chained urb and waits for completion or timeout */
605static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int timeout, int* actual_length)
606{
607 auerchain_chs_t chs;
608 int status;
609
610 dbg ("auerchain_start_wait_urb called");
611 init_waitqueue_head (&chs.wqh);
612 chs.done = 0;
613
614 urb->context = &chs;
615 status = auerchain_submit_urb (acp, urb);
616 if (status)
617 /* something went wrong */
618 return status;
619
620 timeout = wait_event_timeout(chs.wqh, chs.done, timeout);
621
622 if (!timeout && !chs.done) {
623 if (urb->status != -EINPROGRESS) { /* No callback?!! */
624 dbg ("auerchain_start_wait_urb: raced timeout");
625 status = urb->status;
626 } else {
627 dbg ("auerchain_start_wait_urb: timeout");
628 auerchain_unlink_urb (acp, urb); /* remove urb safely */
629 status = -ETIMEDOUT;
630 }
631 } else
632 status = urb->status;
633
634 if (status >= 0)
635 *actual_length = urb->actual_length;
636
637 return status;
638}
639
640
641/* auerchain_control_msg - Builds a control urb, sends it off and waits for completion
642 acp: pointer to the auerchain
643 dev: pointer to the usb device to send the message to
644 pipe: endpoint "pipe" to send the message to
645 request: USB message request value
646 requesttype: USB message request type value
647 value: USB message value
648 index: USB message index value
649 data: pointer to the data to send
650 size: length in bytes of the data to send
651 timeout: time to wait for the message to complete before timing out (if 0 the wait is forever)
652
653 This function sends a simple control message to a specified endpoint
654 and waits for the message to complete, or timeout.
655
656 If successful, it returns the transferred length, otherwise a negative error number.
657
658 Don't use this function from within an interrupt context, like a
659 bottom half handler. If you need an asynchronous message, or need to send
660 a message from within interrupt context, use auerchain_submit_urb()
661*/
662static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
663 __u16 value, __u16 index, void *data, __u16 size, int timeout)
664{
665 int ret;
666 struct usb_ctrlrequest *dr;
667 struct urb *urb;
668 int uninitialized_var(length);
669
670 dbg ("auerchain_control_msg");
671 dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
672 if (!dr)
673 return -ENOMEM;
674 urb = usb_alloc_urb (0, GFP_KERNEL);
675 if (!urb) {
676 kfree (dr);
677 return -ENOMEM;
678 }
679
680 dr->bRequestType = requesttype;
681 dr->bRequest = request;
682 dr->wValue = cpu_to_le16 (value);
683 dr->wIndex = cpu_to_le16 (index);
684 dr->wLength = cpu_to_le16 (size);
685
686 usb_fill_control_urb (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */
687 auerchain_blocking_completion, NULL);
688 ret = auerchain_start_wait_urb (acp, urb, timeout, &length);
689
690 usb_free_urb (urb);
691 kfree (dr);
692
693 if (ret < 0)
694 return ret;
695 else
696 return length;
697}
698
699
700/*-------------------------------------------------------------------*/
701/* Buffer List helper functions */
702
703/* free a single auerbuf */
704static void auerbuf_free (pauerbuf_t bp)
705{
706 kfree(bp->bufp);
707 kfree(bp->dr);
708 usb_free_urb(bp->urbp);
709 kfree(bp);
710}
711
712/* free the buffers from an auerbuf list */
713static void auerbuf_free_list (struct list_head *q)
714{
715 struct list_head *tmp;
716 struct list_head *p;
717 pauerbuf_t bp;
718
719 dbg ("auerbuf_free_list");
720 for (p = q->next; p != q;) {
721 bp = list_entry (p, auerbuf_t, buff_list);
722 tmp = p->next;
723 list_del (p);
724 p = tmp;
725 auerbuf_free (bp);
726 }
727}
728
729/* init the members of a list control block */
730static void auerbuf_init (pauerbufctl_t bcp)
731{
732 dbg ("auerbuf_init");
733 spin_lock_init (&bcp->lock);
734 INIT_LIST_HEAD (&bcp->free_buff_list);
735 INIT_LIST_HEAD (&bcp->rec_buff_list);
736}
737
738/* free all buffers from an auerbuf chain */
739static void auerbuf_free_buffers (pauerbufctl_t bcp)
740{
741 unsigned long flags;
742 dbg ("auerbuf_free_buffers");
743
744 spin_lock_irqsave (&bcp->lock, flags);
745
746 auerbuf_free_list (&bcp->free_buff_list);
747 auerbuf_free_list (&bcp->rec_buff_list);
748
749 spin_unlock_irqrestore (&bcp->lock, flags);
750}
751
752/* setup a list of buffers */
753/* requirement: auerbuf_init() */
754static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize)
755{
756 pauerbuf_t bep = NULL;
757
758 dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize);
759
760 /* fill the list of free elements */
761 for (;numElements; numElements--) {
762 bep = kzalloc(sizeof(auerbuf_t), GFP_KERNEL);
763 if (!bep)
764 goto bl_fail;
765 bep->list = bcp;
766 INIT_LIST_HEAD (&bep->buff_list);
767 bep->bufp = kmalloc (bufsize, GFP_KERNEL);
768 if (!bep->bufp)
769 goto bl_fail;
770 bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL);
771 if (!bep->dr)
772 goto bl_fail;
773 bep->urbp = usb_alloc_urb (0, GFP_KERNEL);
774 if (!bep->urbp)
775 goto bl_fail;
776 list_add_tail (&bep->buff_list, &bcp->free_buff_list);
777 }
778 return 0;
779
780bl_fail:/* not enough memory. Free allocated elements */
781 dbg ("auerbuf_setup: no more memory");
782 auerbuf_free(bep);
783 auerbuf_free_buffers (bcp);
784 return -ENOMEM;
785}
786
787/* insert a used buffer into the free list */
788static void auerbuf_releasebuf( pauerbuf_t bp)
789{
790 unsigned long flags;
791 pauerbufctl_t bcp = bp->list;
792 bp->retries = 0;
793
794 dbg ("auerbuf_releasebuf called");
795 spin_lock_irqsave (&bcp->lock, flags);
796 list_add_tail (&bp->buff_list, &bcp->free_buff_list);
797 spin_unlock_irqrestore (&bcp->lock, flags);
798}
799
800
801/*-------------------------------------------------------------------*/
802/* Completion handlers */
803
804/* Values of urb->status or results of usb_submit_urb():
8050 Initial, OK
806-EINPROGRESS during submission until end
807-ENOENT if urb is unlinked
808-ETIME Device did not respond
809-ENOMEM Memory Overflow
810-ENODEV Specified USB-device or bus doesn't exist
811-ENXIO URB already queued
812-EINVAL a) Invalid transfer type specified (or not supported)
813 b) Invalid interrupt interval (0n256)
814-EAGAIN a) Specified ISO start frame too early
815 b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again.
816-EFBIG Too much ISO frames requested (currently uhci900)
817-EPIPE Specified pipe-handle/Endpoint is already stalled
818-EMSGSIZE Endpoint message size is zero, do interface/alternate setting
819-EPROTO a) Bitstuff error
820 b) Unknown USB error
821-EILSEQ CRC mismatch
822-ENOSR Buffer error
823-EREMOTEIO Short packet detected
824-EXDEV ISO transfer only partially completed look at individual frame status for details
825-EINVAL ISO madness, if this happens: Log off and go home
826-EOVERFLOW babble
827*/
828
829/* check if a status code allows a retry */
830static int auerswald_status_retry (int status)
831{
832 switch (status) {
833 case 0:
834 case -ETIME:
835 case -EOVERFLOW:
836 case -EAGAIN:
837 case -EPIPE:
838 case -EPROTO:
839 case -EILSEQ:
840 case -ENOSR:
841 case -EREMOTEIO:
842 return 1; /* do a retry */
843 }
844 return 0; /* no retry possible */
845}
846
847/* Completion of asynchronous write block */
848static void auerchar_ctrlwrite_complete (struct urb * urb)
849{
850 pauerbuf_t bp = urb->context;
851 pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
852 dbg ("auerchar_ctrlwrite_complete called");
853
854 /* reuse the buffer */
855 auerbuf_releasebuf (bp);
856 /* Wake up all processes waiting for a buffer */
857 wake_up (&cp->bufferwait);
858}
859
860/* Completion handler for dummy retry packet */
861static void auerswald_ctrlread_wretcomplete (struct urb * urb)
862{
863 pauerbuf_t bp = urb->context;
864 pauerswald_t cp;
865 int ret;
866 int status = urb->status;
867
868 dbg ("auerswald_ctrlread_wretcomplete called");
869 dbg ("complete with status: %d", status);
870 cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
871
872 /* check if it is possible to advance */
873 if (!auerswald_status_retry(status) || !cp->usbdev) {
874 /* reuse the buffer */
875 err ("control dummy: transmission error %d, can not retry", status);
876 auerbuf_releasebuf (bp);
877 /* Wake up all processes waiting for a buffer */
878 wake_up (&cp->bufferwait);
879 return;
880 }
881
882 /* fill the control message */
883 bp->dr->bRequestType = AUT_RREQ;
884 bp->dr->bRequest = AUV_RBLOCK;
885 bp->dr->wLength = bp->dr->wValue; /* temporary stored */
886 bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */
887 /* bp->dr->index = channel id; remains */
888 usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
889 (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength),
890 auerswald_ctrlread_complete,bp);
891
892 /* submit the control msg as next paket */
893 ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
894 if (ret) {
895 dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
896 bp->urbp->status = ret;
897 auerswald_ctrlread_complete (bp->urbp);
898 }
899}
900
901/* completion handler for receiving of control messages */
902static void auerswald_ctrlread_complete (struct urb * urb)
903{
904 unsigned int serviceid;
905 pauerswald_t cp;
906 pauerscon_t scp;
907 pauerbuf_t bp = urb->context;
908 int status = urb->status;
909 int ret;
910
911 dbg ("auerswald_ctrlread_complete called");
912
913 cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
914
915 /* check if there is valid data in this urb */
916 if (status) {
917 dbg ("complete with non-zero status: %d", status);
918 /* should we do a retry? */
919 if (!auerswald_status_retry(status)
920 || !cp->usbdev
921 || (cp->version < AUV_RETRY)
922 || (bp->retries >= AU_RETRIES)) {
923 /* reuse the buffer */
924 err ("control read: transmission error %d, can not retry", status);
925 auerbuf_releasebuf (bp);
926 /* Wake up all processes waiting for a buffer */
927 wake_up (&cp->bufferwait);
928 return;
929 }
930 bp->retries++;
931 dbg ("Retry count = %d", bp->retries);
932 /* send a long dummy control-write-message to allow device firmware to react */
933 bp->dr->bRequestType = AUT_WREQ;
934 bp->dr->bRequest = AUV_DUMMY;
935 bp->dr->wValue = bp->dr->wLength; /* temporary storage */
936 // bp->dr->wIndex channel ID remains
937 bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */
938 usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
939 (unsigned char*)bp->dr, bp->bufp, 32,
940 auerswald_ctrlread_wretcomplete,bp);
941
942 /* submit the control msg as next paket */
943 ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
944 if (ret) {
945 dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
946 bp->urbp->status = ret;
947 auerswald_ctrlread_wretcomplete (bp->urbp);
948 }
949 return;
950 }
951
952 /* get the actual bytecount (incl. headerbyte) */
953 bp->len = urb->actual_length;
954 serviceid = bp->bufp[0] & AUH_TYPEMASK;
955 dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len);
956
957 /* dispatch the paket */
958 scp = cp->services[serviceid];
959 if (scp) {
960 /* look, Ma, a listener! */
961 scp->dispatch (scp, bp);
962 }
963
964 /* release the paket */
965 auerbuf_releasebuf (bp);
966 /* Wake up all processes waiting for a buffer */
967 wake_up (&cp->bufferwait);
968}
969
970/*-------------------------------------------------------------------*/
971/* Handling of Interrupt Endpoint */
972/* This interrupt Endpoint is used to inform the host about waiting
973 messages from the USB device.
974*/
975/* int completion handler. */
976static void auerswald_int_complete (struct urb * urb)
977{
978 unsigned long flags;
979 unsigned int channelid;
980 unsigned int bytecount;
981 int ret;
982 int status = urb->status;
983 pauerbuf_t bp = NULL;
984 pauerswald_t cp = urb->context;
985
986 dbg ("%s called", __func__);
987
988 switch (status) {
989 case 0:
990 /* success */
991 break;
992 case -ECONNRESET:
993 case -ENOENT:
994 case -ESHUTDOWN:
995 /* this urb is terminated, clean up */
996 dbg("%s - urb shutting down with status: %d", __func__, status);
997 return;
998 default:
999 dbg("%s - nonzero urb status received: %d", __func__, status);
1000 goto exit;
1001 }
1002
1003 /* check if all needed data was received */
1004 if (urb->actual_length < AU_IRQMINSIZE) {
1005 dbg ("invalid data length received: %d bytes", urb->actual_length);
1006 goto exit;
1007 }
1008
1009 /* check the command code */
1010 if (cp->intbufp[0] != AU_IRQCMDID) {
1011 dbg ("invalid command received: %d", cp->intbufp[0]);
1012 goto exit;
1013 }
1014
1015 /* check the command type */
1016 if (cp->intbufp[1] != AU_BLOCKRDY) {
1017 dbg ("invalid command type received: %d", cp->intbufp[1]);
1018 goto exit;
1019 }
1020
1021 /* now extract the information */
1022 channelid = cp->intbufp[2];
1023 bytecount = (unsigned char)cp->intbufp[3];
1024 bytecount |= (unsigned char)cp->intbufp[4] << 8;
1025
1026 /* check the channel id */
1027 if (channelid >= AUH_TYPESIZE) {
1028 dbg ("invalid channel id received: %d", channelid);
1029 goto exit;
1030 }
1031
1032 /* check the byte count */
1033 if (bytecount > (cp->maxControlLength+AUH_SIZE)) {
1034 dbg ("invalid byte count received: %d", bytecount);
1035 goto exit;
1036 }
1037 dbg ("Service Channel = %d", channelid);
1038 dbg ("Byte Count = %d", bytecount);
1039
1040 /* get a buffer for the next data paket */
1041 spin_lock_irqsave (&cp->bufctl.lock, flags);
1042 if (!list_empty (&cp->bufctl.free_buff_list)) {
1043 /* yes: get the entry */
1044 struct list_head *tmp = cp->bufctl.free_buff_list.next;
1045 list_del (tmp);
1046 bp = list_entry (tmp, auerbuf_t, buff_list);
1047 }
1048 spin_unlock_irqrestore (&cp->bufctl.lock, flags);
1049
1050 /* if no buffer available: skip it */
1051 if (!bp) {
1052 dbg ("auerswald_int_complete: no data buffer available");
1053 /* can we do something more?
1054 This is a big problem: if this int packet is ignored, the
1055 device will wait forever and not signal any more data.
1056 The only real solution is: having enough buffers!
1057 Or perhaps temporary disabling the int endpoint?
1058 */
1059 goto exit;
1060 }
1061
1062 /* fill the control message */
1063 bp->dr->bRequestType = AUT_RREQ;
1064 bp->dr->bRequest = AUV_RBLOCK;
1065 bp->dr->wValue = cpu_to_le16 (0);
1066 bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
1067 bp->dr->wLength = cpu_to_le16 (bytecount);
1068 usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
1069 (unsigned char*)bp->dr, bp->bufp, bytecount,
1070 auerswald_ctrlread_complete,bp);
1071
1072 /* submit the control msg */
1073 ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
1074 if (ret) {
1075 dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret);
1076 bp->urbp->status = ret;
1077 auerswald_ctrlread_complete( bp->urbp);
1078 /* here applies the same problem as above: device locking! */
1079 }
1080exit:
1081 ret = usb_submit_urb (urb, GFP_ATOMIC);
1082 if (ret)
1083 err ("%s - usb_submit_urb failed with result %d",
1084 __func__, ret);
1085}
1086
1087/* int memory deallocation
1088 NOTE: no mutex please!
1089*/
1090static void auerswald_int_free (pauerswald_t cp)
1091{
1092 if (cp->inturbp) {
1093 usb_free_urb(cp->inturbp);
1094 cp->inturbp = NULL;
1095 }
1096 kfree(cp->intbufp);
1097 cp->intbufp = NULL;
1098}
1099
1100/* This function is called to activate the interrupt
1101 endpoint. This function returns 0 if successful or an error code.
1102 NOTE: no mutex please!
1103*/
1104static int auerswald_int_open (pauerswald_t cp)
1105{
1106 int ret;
1107 struct usb_host_endpoint *ep;
1108 int irqsize;
1109 dbg ("auerswald_int_open");
1110
1111 ep = cp->usbdev->ep_in[AU_IRQENDP];
1112 if (!ep) {
1113 ret = -EFAULT;
1114 goto intoend;
1115 }
1116 irqsize = le16_to_cpu(ep->desc.wMaxPacketSize);
1117 cp->irqsize = irqsize;
1118
1119 /* allocate the urb and data buffer */
1120 if (!cp->inturbp) {
1121 cp->inturbp = usb_alloc_urb (0, GFP_KERNEL);
1122 if (!cp->inturbp) {
1123 ret = -ENOMEM;
1124 goto intoend;
1125 }
1126 }
1127 if (!cp->intbufp) {
1128 cp->intbufp = kmalloc (irqsize, GFP_KERNEL);
1129 if (!cp->intbufp) {
1130 ret = -ENOMEM;
1131 goto intoend;
1132 }
1133 }
1134 /* setup urb */
1135 usb_fill_int_urb (cp->inturbp, cp->usbdev,
1136 usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp,
1137 irqsize, auerswald_int_complete, cp, ep->desc.bInterval);
1138 /* start the urb */
1139 cp->inturbp->status = 0; /* needed! */
1140 ret = usb_submit_urb (cp->inturbp, GFP_KERNEL);
1141
1142intoend:
1143 if (ret < 0) {
1144 /* activation of interrupt endpoint has failed. Now clean up. */
1145 dbg ("auerswald_int_open: activation of int endpoint failed");
1146
1147 /* deallocate memory */
1148 auerswald_int_free (cp);
1149 }
1150 return ret;
1151}
1152
1153/* This function is called to deactivate the interrupt
1154 endpoint. This function returns 0 if successful or an error code.
1155 NOTE: no mutex please!
1156*/
1157static void auerswald_int_release (pauerswald_t cp)
1158{
1159 dbg ("auerswald_int_release");
1160
1161 /* stop the int endpoint */
1162 usb_kill_urb (cp->inturbp);
1163
1164 /* deallocate memory */
1165 auerswald_int_free (cp);
1166}
1167
1168/* --------------------------------------------------------------------- */
1169/* Helper functions */
1170
1171/* wake up waiting readers */
1172static void auerchar_disconnect (pauerscon_t scp)
1173{
1174 pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
1175 dbg ("auerchar_disconnect called");
1176 ccp->removed = 1;
1177 wake_up (&ccp->readwait);
1178}
1179
1180
1181/* dispatch a read paket to a waiting character device */
1182static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp)
1183{
1184 unsigned long flags;
1185 pauerchar_t ccp;
1186 pauerbuf_t newbp = NULL;
1187 char * charp;
1188 dbg ("auerchar_ctrlread_dispatch called");
1189 ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
1190
1191 /* get a read buffer from character device context */
1192 spin_lock_irqsave (&ccp->bufctl.lock, flags);
1193 if (!list_empty (&ccp->bufctl.free_buff_list)) {
1194 /* yes: get the entry */
1195 struct list_head *tmp = ccp->bufctl.free_buff_list.next;
1196 list_del (tmp);
1197 newbp = list_entry (tmp, auerbuf_t, buff_list);
1198 }
1199 spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
1200
1201 if (!newbp) {
1202 dbg ("No read buffer available, discard paket!");
1203 return; /* no buffer, no dispatch */
1204 }
1205
1206 /* copy information to new buffer element
1207 (all buffers have the same length) */
1208 charp = newbp->bufp;
1209 newbp->bufp = bp->bufp;
1210 bp->bufp = charp;
1211 newbp->len = bp->len;
1212
1213 /* insert new buffer in read list */
1214 spin_lock_irqsave (&ccp->bufctl.lock, flags);
1215 list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list);
1216 spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
1217 dbg ("read buffer appended to rec_list");
1218
1219 /* wake up pending synchronous reads */
1220 wake_up (&ccp->readwait);
1221}
1222
1223
1224/* Delete an auerswald driver context */
1225static void auerswald_delete( pauerswald_t cp)
1226{
1227 dbg( "auerswald_delete");
1228 if (cp == NULL)
1229 return;
1230
1231 /* Wake up all processes waiting for a buffer */
1232 wake_up (&cp->bufferwait);
1233
1234 /* Cleaning up */
1235 auerswald_int_release (cp);
1236 auerchain_free (&cp->controlchain);
1237 auerbuf_free_buffers (&cp->bufctl);
1238
1239 /* release the memory */
1240 kfree( cp);
1241}
1242
1243
1244/* Delete an auerswald character context */
1245static void auerchar_delete( pauerchar_t ccp)
1246{
1247 dbg ("auerchar_delete");
1248 if (ccp == NULL)
1249 return;
1250
1251 /* wake up pending synchronous reads */
1252 ccp->removed = 1;
1253 wake_up (&ccp->readwait);
1254
1255 /* remove the read buffer */
1256 if (ccp->readbuf) {
1257 auerbuf_releasebuf (ccp->readbuf);
1258 ccp->readbuf = NULL;
1259 }
1260
1261 /* remove the character buffers */
1262 auerbuf_free_buffers (&ccp->bufctl);
1263
1264 /* release the memory */
1265 kfree( ccp);
1266}
1267
1268
1269/* add a new service to the device
1270 scp->id must be set!
1271 return: 0 if OK, else error code
1272*/
1273static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
1274{
1275 int ret;
1276
1277 /* is the device available? */
1278 if (!cp->usbdev) {
1279 dbg ("usbdev == NULL");
1280 return -EIO; /*no: can not add a service, sorry*/
1281 }
1282
1283 /* is the service available? */
1284 if (cp->services[scp->id]) {
1285 dbg ("service is busy");
1286 return -EBUSY;
1287 }
1288
1289 /* device is available, service is free */
1290 cp->services[scp->id] = scp;
1291
1292 /* register service in device */
1293 ret = auerchain_control_msg(
1294 &cp->controlchain, /* pointer to control chain */
1295 cp->usbdev, /* pointer to device */
1296 usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
1297 AUV_CHANNELCTL, /* USB message request value */
1298 AUT_WREQ, /* USB message request type value */
1299 0x01, /* open USB message value */
1300 scp->id, /* USB message index value */
1301 NULL, /* pointer to the data to send */
1302 0, /* length in bytes of the data to send */
1303 HZ * 2); /* time to wait for the message to complete before timing out */
1304 if (ret < 0) {
1305 dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret);
1306 /* undo above actions */
1307 cp->services[scp->id] = NULL;
1308 return ret;
1309 }
1310
1311 dbg ("auerswald_addservice: channel open OK");
1312 return 0;
1313}
1314
1315
1316/* remove a service from the device
1317 scp->id must be set! */
1318static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
1319{
1320 dbg ("auerswald_removeservice called");
1321
1322 /* check if we have a service allocated */
1323 if (scp->id == AUH_UNASSIGNED)
1324 return;
1325
1326 /* If there is a device: close the channel */
1327 if (cp->usbdev) {
1328 /* Close the service channel inside the device */
1329 int ret = auerchain_control_msg(
1330 &cp->controlchain, /* pointer to control chain */
1331 cp->usbdev, /* pointer to device */
1332 usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
1333 AUV_CHANNELCTL, /* USB message request value */
1334 AUT_WREQ, /* USB message request type value */
1335 0x00, // close /* USB message value */
1336 scp->id, /* USB message index value */
1337 NULL, /* pointer to the data to send */
1338 0, /* length in bytes of the data to send */
1339 HZ * 2); /* time to wait for the message to complete before timing out */
1340 if (ret < 0) {
1341 dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret);
1342 }
1343 else {
1344 dbg ("auerswald_removeservice: channel close OK");
1345 }
1346 }
1347
1348 /* remove the service from the device */
1349 cp->services[scp->id] = NULL;
1350 scp->id = AUH_UNASSIGNED;
1351}
1352
1353
1354/* --------------------------------------------------------------------- */
1355/* Char device functions */
1356
1357/* Open a new character device */
1358static int auerchar_open (struct inode *inode, struct file *file)
1359{
1360 int dtindex = iminor(inode);
1361 pauerswald_t cp = NULL;
1362 pauerchar_t ccp = NULL;
1363 struct usb_interface *intf;
1364 int ret;
1365
1366 /* minor number in range? */
1367 if (dtindex < 0) {
1368 return -ENODEV;
1369 }
1370 intf = usb_find_interface(&auerswald_driver, dtindex);
1371 if (!intf) {
1372 return -ENODEV;
1373 }
1374
1375 /* usb device available? */
1376 cp = usb_get_intfdata (intf);
1377 if (cp == NULL) {
1378 return -ENODEV;
1379 }
1380 if (mutex_lock_interruptible(&cp->mutex)) {
1381 return -ERESTARTSYS;
1382 }
1383
1384 /* we have access to the device. Now lets allocate memory */
1385 ccp = kzalloc(sizeof(auerchar_t), GFP_KERNEL);
1386 if (ccp == NULL) {
1387 err ("out of memory");
1388 ret = -ENOMEM;
1389 goto ofail;
1390 }
1391
1392 /* Initialize device descriptor */
1393 mutex_init(&ccp->mutex);
1394 mutex_init(&ccp->readmutex);
1395 auerbuf_init (&ccp->bufctl);
1396 ccp->scontext.id = AUH_UNASSIGNED;
1397 ccp->scontext.dispatch = auerchar_ctrlread_dispatch;
1398 ccp->scontext.disconnect = auerchar_disconnect;
1399 init_waitqueue_head (&ccp->readwait);
1400
1401 ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE);
1402 if (ret) {
1403 goto ofail;
1404 }
1405
1406 cp->open_count++;
1407 ccp->auerdev = cp;
1408 dbg("open %s as /dev/%s", cp->dev_desc, cp->name);
1409 mutex_unlock(&cp->mutex);
1410
1411 /* file IO stuff */
1412 file->f_pos = 0;
1413 file->private_data = ccp;
1414 return nonseekable_open(inode, file);
1415
1416 /* Error exit */
1417ofail: mutex_unlock(&cp->mutex);
1418 auerchar_delete (ccp);
1419 return ret;
1420}
1421
1422
1423/* IOCTL functions */
1424static long auerchar_ioctl(struct file *file, unsigned int cmd,
1425 unsigned long arg)
1426{
1427 pauerchar_t ccp = (pauerchar_t) file->private_data;
1428 int ret = 0;
1429 audevinfo_t devinfo;
1430 pauerswald_t cp = NULL;
1431 unsigned int u;
1432 unsigned int __user *user_arg = (unsigned int __user *)arg;
1433
1434 dbg ("ioctl");
1435
1436 /* get the mutexes */
1437 if (mutex_lock_interruptible(&ccp->mutex)) {
1438 return -ERESTARTSYS;
1439 }
1440 cp = ccp->auerdev;
1441 if (!cp) {
1442 mutex_unlock(&ccp->mutex);
1443 return -ENODEV;
1444 }
1445 if (mutex_lock_interruptible(&cp->mutex)) {
1446 mutex_unlock(&ccp->mutex);
1447 return -ERESTARTSYS;
1448 }
1449
1450 /* Check for removal */
1451 if (!cp->usbdev) {
1452 mutex_unlock(&cp->mutex);
1453 mutex_unlock(&ccp->mutex);
1454 return -ENODEV;
1455 }
1456 lock_kernel();
1457 switch (cmd) {
1458
1459 /* return != 0 if Transmitt channel ready to send */
1460 case IOCTL_AU_TXREADY:
1461 dbg ("IOCTL_AU_TXREADY");
1462 u = ccp->auerdev
1463 && (ccp->scontext.id != AUH_UNASSIGNED)
1464 && !list_empty (&cp->bufctl.free_buff_list);
1465 ret = put_user (u, user_arg);
1466 break;
1467
1468 /* return != 0 if connected to a service channel */
1469 case IOCTL_AU_CONNECT:
1470 dbg ("IOCTL_AU_CONNECT");
1471 u = (ccp->scontext.id != AUH_UNASSIGNED);
1472 ret = put_user (u, user_arg);
1473 break;
1474
1475 /* return != 0 if Receive Data available */
1476 case IOCTL_AU_RXAVAIL:
1477 dbg ("IOCTL_AU_RXAVAIL");
1478 if (ccp->scontext.id == AUH_UNASSIGNED) {
1479 ret = -EIO;
1480 break;
1481 }
1482 u = 0; /* no data */
1483 if (ccp->readbuf) {
1484 int restlen = ccp->readbuf->len - ccp->readoffset;
1485 if (restlen > 0)
1486 u = 1;
1487 }
1488 if (!u) {
1489 if (!list_empty (&ccp->bufctl.rec_buff_list)) {
1490 u = 1;
1491 }
1492 }
1493 ret = put_user (u, user_arg);
1494 break;
1495
1496 /* return the max. buffer length for the device */
1497 case IOCTL_AU_BUFLEN:
1498 dbg ("IOCTL_AU_BUFLEN");
1499 u = cp->maxControlLength;
1500 ret = put_user (u, user_arg);
1501 break;
1502
1503 /* requesting a service channel */
1504 case IOCTL_AU_SERVREQ:
1505 dbg ("IOCTL_AU_SERVREQ");
1506 /* requesting a service means: release the previous one first */
1507 auerswald_removeservice (cp, &ccp->scontext);
1508 /* get the channel number */
1509 ret = get_user (u, user_arg);
1510 if (ret) {
1511 break;
1512 }
1513 if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) {
1514 ret = -EIO;
1515 break;
1516 }
1517 dbg ("auerchar service request parameters are ok");
1518 ccp->scontext.id = u;
1519
1520 /* request the service now */
1521 ret = auerswald_addservice (cp, &ccp->scontext);
1522 if (ret) {
1523 /* no: revert service entry */
1524 ccp->scontext.id = AUH_UNASSIGNED;
1525 }
1526 break;
1527
1528 /* get a string descriptor for the device */
1529 case IOCTL_AU_DEVINFO:
1530 dbg ("IOCTL_AU_DEVINFO");
1531 if (copy_from_user (&devinfo, (void __user *) arg, sizeof (audevinfo_t))) {
1532 ret = -EFAULT;
1533 break;
1534 }
1535 u = strlen(cp->dev_desc)+1;
1536 if (u > devinfo.bsize) {
1537 u = devinfo.bsize;
1538 }
1539 ret = copy_to_user(devinfo.buf, cp->dev_desc, u) ? -EFAULT : 0;
1540 break;
1541
1542 /* get the max. string descriptor length */
1543 case IOCTL_AU_SLEN:
1544 dbg ("IOCTL_AU_SLEN");
1545 u = AUSI_DLEN;
1546 ret = put_user (u, user_arg);
1547 break;
1548
1549 default:
1550 dbg ("IOCTL_AU_UNKNOWN");
1551 ret = -ENOTTY;
1552 break;
1553 }
1554 unlock_kernel();
1555 /* release the mutexes */
1556 mutex_unlock(&cp->mutex);
1557 mutex_unlock(&ccp->mutex);
1558 return ret;
1559}
1560
1561/* Read data from the device */
1562static ssize_t auerchar_read (struct file *file, char __user *buf, size_t count, loff_t * ppos)
1563{
1564 unsigned long flags;
1565 pauerchar_t ccp = (pauerchar_t) file->private_data;
1566 pauerbuf_t bp = NULL;
1567 wait_queue_t wait;
1568
1569 dbg ("auerchar_read");
1570
1571 /* Error checking */
1572 if (!ccp)
1573 return -EIO;
1574 if (*ppos)
1575 return -ESPIPE;
1576 if (count == 0)
1577 return 0;
1578
1579 /* get the mutex */
1580 if (mutex_lock_interruptible(&ccp->mutex))
1581 return -ERESTARTSYS;
1582
1583 /* Can we expect to read something? */
1584 if (ccp->scontext.id == AUH_UNASSIGNED) {
1585 mutex_unlock(&ccp->mutex);
1586 return -EIO;
1587 }
1588
1589 /* only one reader per device allowed */
1590 if (mutex_lock_interruptible(&ccp->readmutex)) {
1591 mutex_unlock(&ccp->mutex);
1592 return -ERESTARTSYS;
1593 }
1594
1595 /* read data from readbuf, if available */
1596doreadbuf:
1597 bp = ccp->readbuf;
1598 if (bp) {
1599 /* read the maximum bytes */
1600 int restlen = bp->len - ccp->readoffset;
1601 if (restlen < 0)
1602 restlen = 0;
1603 if (count > restlen)
1604 count = restlen;
1605 if (count) {
1606 if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) {
1607 dbg ("auerswald_read: copy_to_user failed");
1608 mutex_unlock(&ccp->readmutex);
1609 mutex_unlock(&ccp->mutex);
1610 return -EFAULT;
1611 }
1612 }
1613 /* advance the read offset */
1614 ccp->readoffset += count;
1615 restlen -= count;
1616 // reuse the read buffer
1617 if (restlen <= 0) {
1618 auerbuf_releasebuf (bp);
1619 ccp->readbuf = NULL;
1620 }
1621 /* return with number of bytes read */
1622 if (count) {
1623 mutex_unlock(&ccp->readmutex);
1624 mutex_unlock(&ccp->mutex);
1625 return count;
1626 }
1627 }
1628
1629 /* a read buffer is not available. Try to get the next data block. */
1630doreadlist:
1631 /* Preparing for sleep */
1632 init_waitqueue_entry (&wait, current);
1633 set_current_state (TASK_INTERRUPTIBLE);
1634 add_wait_queue (&ccp->readwait, &wait);
1635
1636 bp = NULL;
1637 spin_lock_irqsave (&ccp->bufctl.lock, flags);
1638 if (!list_empty (&ccp->bufctl.rec_buff_list)) {
1639 /* yes: get the entry */
1640 struct list_head *tmp = ccp->bufctl.rec_buff_list.next;
1641 list_del (tmp);
1642 bp = list_entry (tmp, auerbuf_t, buff_list);
1643 }
1644 spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
1645
1646 /* have we got data? */
1647 if (bp) {
1648 ccp->readbuf = bp;
1649 ccp->readoffset = AUH_SIZE; /* for headerbyte */
1650 set_current_state (TASK_RUNNING);
1651 remove_wait_queue (&ccp->readwait, &wait);
1652 goto doreadbuf; /* now we can read! */
1653 }
1654
1655 /* no data available. Should we wait? */
1656 if (file->f_flags & O_NONBLOCK) {
1657 dbg ("No read buffer available, returning -EAGAIN");
1658 set_current_state (TASK_RUNNING);
1659 remove_wait_queue (&ccp->readwait, &wait);
1660 mutex_unlock(&ccp->readmutex);
1661 mutex_unlock(&ccp->mutex);
1662 return -EAGAIN; /* nonblocking, no data available */
1663 }
1664
1665 /* yes, we should wait! */
1666 mutex_unlock(&ccp->mutex); /* allow other operations while we wait */
1667 schedule();
1668 remove_wait_queue (&ccp->readwait, &wait);
1669 if (signal_pending (current)) {
1670 /* waked up by a signal */
1671 mutex_unlock(&ccp->readmutex);
1672 return -ERESTARTSYS;
1673 }
1674
1675 /* Anything left to read? */
1676 if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) {
1677 mutex_unlock(&ccp->readmutex);
1678 return -EIO;
1679 }
1680
1681 if (mutex_lock_interruptible(&ccp->mutex)) {
1682 mutex_unlock(&ccp->readmutex);
1683 return -ERESTARTSYS;
1684 }
1685
1686 /* try to read the incoming data again */
1687 goto doreadlist;
1688}
1689
1690
1691/* Write a data block into the right service channel of the device */
1692static ssize_t auerchar_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
1693{
1694 pauerchar_t ccp = (pauerchar_t) file->private_data;
1695 pauerswald_t cp = NULL;
1696 pauerbuf_t bp;
1697 unsigned long flags;
1698 int ret;
1699 wait_queue_t wait;
1700
1701 dbg ("auerchar_write %zd bytes", len);
1702
1703 /* Error checking */
1704 if (!ccp)
1705 return -EIO;
1706 if (*ppos)
1707 return -ESPIPE;
1708 if (len == 0)
1709 return 0;
1710
1711write_again:
1712 /* get the mutex */
1713 if (mutex_lock_interruptible(&ccp->mutex))
1714 return -ERESTARTSYS;
1715
1716 /* Can we expect to write something? */
1717 if (ccp->scontext.id == AUH_UNASSIGNED) {
1718 mutex_unlock(&ccp->mutex);
1719 return -EIO;
1720 }
1721
1722 cp = ccp->auerdev;
1723 if (!cp) {
1724 mutex_unlock(&ccp->mutex);
1725 return -ERESTARTSYS;
1726 }
1727 if (mutex_lock_interruptible(&cp->mutex)) {
1728 mutex_unlock(&ccp->mutex);
1729 return -ERESTARTSYS;
1730 }
1731 if (!cp->usbdev) {
1732 mutex_unlock(&cp->mutex);
1733 mutex_unlock(&ccp->mutex);
1734 return -EIO;
1735 }
1736 /* Prepare for sleep */
1737 init_waitqueue_entry (&wait, current);
1738 set_current_state (TASK_INTERRUPTIBLE);
1739 add_wait_queue (&cp->bufferwait, &wait);
1740
1741 /* Try to get a buffer from the device pool.
1742 We can't use a buffer from ccp->bufctl because the write
1743 command will last beond a release() */
1744 bp = NULL;
1745 spin_lock_irqsave (&cp->bufctl.lock, flags);
1746 if (!list_empty (&cp->bufctl.free_buff_list)) {
1747 /* yes: get the entry */
1748 struct list_head *tmp = cp->bufctl.free_buff_list.next;
1749 list_del (tmp);
1750 bp = list_entry (tmp, auerbuf_t, buff_list);
1751 }
1752 spin_unlock_irqrestore (&cp->bufctl.lock, flags);
1753
1754 /* are there any buffers left? */
1755 if (!bp) {
1756 mutex_unlock(&cp->mutex);
1757 mutex_unlock(&ccp->mutex);
1758
1759 /* NONBLOCK: don't wait */
1760 if (file->f_flags & O_NONBLOCK) {
1761 set_current_state (TASK_RUNNING);
1762 remove_wait_queue (&cp->bufferwait, &wait);
1763 return -EAGAIN;
1764 }
1765
1766 /* BLOCKING: wait */
1767 schedule();
1768 remove_wait_queue (&cp->bufferwait, &wait);
1769 if (signal_pending (current)) {
1770 /* waked up by a signal */
1771 return -ERESTARTSYS;
1772 }
1773 goto write_again;
1774 } else {
1775 set_current_state (TASK_RUNNING);
1776 remove_wait_queue (&cp->bufferwait, &wait);
1777 }
1778
1779 /* protect against too big write requests */
1780 if (len > cp->maxControlLength)
1781 len = cp->maxControlLength;
1782
1783 /* Fill the buffer */
1784 if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) {
1785 dbg ("copy_from_user failed");
1786 auerbuf_releasebuf (bp);
1787 /* Wake up all processes waiting for a buffer */
1788 wake_up (&cp->bufferwait);
1789 mutex_unlock(&cp->mutex);
1790 mutex_unlock(&ccp->mutex);
1791 return -EFAULT;
1792 }
1793
1794 /* set the header byte */
1795 *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT;
1796
1797 /* Set the transfer Parameters */
1798 bp->len = len+AUH_SIZE;
1799 bp->dr->bRequestType = AUT_WREQ;
1800 bp->dr->bRequest = AUV_WBLOCK;
1801 bp->dr->wValue = cpu_to_le16 (0);
1802 bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
1803 bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE);
1804 usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
1805 (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE,
1806 auerchar_ctrlwrite_complete, bp);
1807 /* up we go */
1808 ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
1809 mutex_unlock(&cp->mutex);
1810 if (ret) {
1811 dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret);
1812 auerbuf_releasebuf (bp);
1813 /* Wake up all processes waiting for a buffer */
1814 wake_up (&cp->bufferwait);
1815 mutex_unlock(&ccp->mutex);
1816 return -EIO;
1817 }
1818 else {
1819 dbg ("auerchar_write: Write OK");
1820 mutex_unlock(&ccp->mutex);
1821 return len;
1822 }
1823}
1824
1825
1826/* Close a character device */
1827static int auerchar_release (struct inode *inode, struct file *file)
1828{
1829 pauerchar_t ccp = (pauerchar_t) file->private_data;
1830 pauerswald_t cp;
1831 dbg("release");
1832
1833 mutex_lock(&ccp->mutex);
1834 cp = ccp->auerdev;
1835 if (cp) {
1836 mutex_lock(&cp->mutex);
1837 /* remove an open service */
1838 auerswald_removeservice (cp, &ccp->scontext);
1839 /* detach from device */
1840 if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) {
1841 /* usb device waits for removal */
1842 mutex_unlock(&cp->mutex);
1843 auerswald_delete (cp);
1844 } else {
1845 mutex_unlock(&cp->mutex);
1846 }
1847 cp = NULL;
1848 ccp->auerdev = NULL;
1849 }
1850 mutex_unlock(&ccp->mutex);
1851 auerchar_delete (ccp);
1852
1853 return 0;
1854}
1855
1856
1857/*----------------------------------------------------------------------*/
1858/* File operation structure */
1859static const struct file_operations auerswald_fops =
1860{
1861 .owner = THIS_MODULE,
1862 .llseek = no_llseek,
1863 .read = auerchar_read,
1864 .write = auerchar_write,
1865 .unlocked_ioctl = auerchar_ioctl,
1866 .open = auerchar_open,
1867 .release = auerchar_release,
1868};
1869
1870static struct usb_class_driver auerswald_class = {
1871 .name = "auer%d",
1872 .fops = &auerswald_fops,
1873 .minor_base = AUER_MINOR_BASE,
1874};
1875
1876
1877/* --------------------------------------------------------------------- */
1878/* Special USB driver functions */
1879
1880/* Probe if this driver wants to serve an USB device
1881
1882 This entry point is called whenever a new device is attached to the bus.
1883 Then the device driver has to create a new instance of its internal data
1884 structures for the new device.
1885
1886 The dev argument specifies the device context, which contains pointers
1887 to all USB descriptors. The interface argument specifies the interface
1888 number. If a USB driver wants to bind itself to a particular device and
1889 interface it has to return a pointer. This pointer normally references
1890 the device driver's context structure.
1891
1892 Probing normally is done by checking the vendor and product identifications
1893 or the class and subclass definitions. If they match the interface number
1894 is compared with the ones supported by the driver. When probing is done
1895 class based it might be necessary to parse some more USB descriptors because
1896 the device properties can differ in a wide range.
1897*/
1898static int auerswald_probe (struct usb_interface *intf,
1899 const struct usb_device_id *id)
1900{
1901 struct usb_device *usbdev = interface_to_usbdev(intf);
1902 pauerswald_t cp = NULL;
1903 unsigned int u = 0;
1904 __le16 *pbuf;
1905 int ret;
1906
1907 dbg ("probe: vendor id 0x%x, device id 0x%x",
1908 le16_to_cpu(usbdev->descriptor.idVendor),
1909 le16_to_cpu(usbdev->descriptor.idProduct));
1910
1911 /* we use only the first -and only- interface */
1912 if (intf->altsetting->desc.bInterfaceNumber != 0)
1913 return -ENODEV;
1914
1915 /* allocate memory for our device and initialize it */
1916 cp = kzalloc (sizeof(auerswald_t), GFP_KERNEL);
1917 if (cp == NULL) {
1918 err ("out of memory");
1919 goto pfail;
1920 }
1921
1922 /* Initialize device descriptor */
1923 mutex_init(&cp->mutex);
1924 cp->usbdev = usbdev;
1925 auerchain_init (&cp->controlchain);
1926 auerbuf_init (&cp->bufctl);
1927 init_waitqueue_head (&cp->bufferwait);
1928
1929 ret = usb_register_dev(intf, &auerswald_class);
1930 if (ret) {
1931 err ("Not able to get a minor for this device.");
1932 goto pfail;
1933 }
1934
1935 /* Give the device a name */
1936 sprintf (cp->name, "usb/auer%d", intf->minor);
1937
1938 /* Store the index */
1939 cp->dtindex = intf->minor;
1940
1941 /* Get the usb version of the device */
1942 cp->version = le16_to_cpu(cp->usbdev->descriptor.bcdDevice);
1943 dbg ("Version is %X", cp->version);
1944
1945 /* allow some time to settle the device */
1946 msleep(334);
1947
1948 /* Try to get a suitable textual description of the device */
1949 /* Device name:*/
1950 ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1);
1951 if (ret >= 0) {
1952 u += ret;
1953 /* Append Serial Number */
1954 memcpy(&cp->dev_desc[u], ",Ser# ", 6);
1955 u += 6;
1956 ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1);
1957 if (ret >= 0) {
1958 u += ret;
1959 /* Append subscriber number */
1960 memcpy(&cp->dev_desc[u], ", ", 2);
1961 u += 2;
1962 ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1);
1963 if (ret >= 0) {
1964 u += ret;
1965 }
1966 }
1967 }
1968 cp->dev_desc[u] = '\0';
1969 info("device is a %s", cp->dev_desc);
1970
1971 /* get the maximum allowed control transfer length */
1972 pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */
1973 if (!pbuf) {
1974 err( "out of memory");
1975 goto pfail;
1976 }
1977 ret = usb_control_msg(cp->usbdev, /* pointer to device */
1978 usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */
1979 AUV_GETINFO, /* USB message request value */
1980 AUT_RREQ, /* USB message request type value */
1981 0, /* USB message value */
1982 AUDI_MBCTRANS, /* USB message index value */
1983 pbuf, /* pointer to the receive buffer */
1984 2, /* length of the buffer */
1985 2000); /* time to wait for the message to complete before timing out */
1986 if (ret == 2) {
1987 cp->maxControlLength = le16_to_cpup(pbuf);
1988 kfree(pbuf);
1989 dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength);
1990 } else {
1991 kfree(pbuf);
1992 err("setup: getting max. allowed control transfer length failed with error %d", ret);
1993 goto pfail;
1994 }
1995
1996 /* allocate a chain for the control messages */
1997 if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) {
1998 err ("out of memory");
1999 goto pfail;
2000 }
2001
2002 /* allocate buffers for control messages */
2003 if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) {
2004 err ("out of memory");
2005 goto pfail;
2006 }
2007
2008 /* start the interrupt endpoint */
2009 if (auerswald_int_open (cp)) {
2010 err ("int endpoint failed");
2011 goto pfail;
2012 }
2013
2014 /* all OK */
2015 usb_set_intfdata (intf, cp);
2016 return 0;
2017
2018 /* Error exit: clean up the memory */
2019pfail: auerswald_delete (cp);
2020 return -EIO;
2021}
2022
2023
2024/* Disconnect driver from a served device
2025
2026 This function is called whenever a device which was served by this driver
2027 is disconnected.
2028
2029 The argument dev specifies the device context and the driver_context
2030 returns a pointer to the previously registered driver_context of the
2031 probe function. After returning from the disconnect function the USB
2032 framework completely deallocates all data structures associated with
2033 this device. So especially the usb_device structure must not be used
2034 any longer by the usb driver.
2035*/
2036static void auerswald_disconnect (struct usb_interface *intf)
2037{
2038 pauerswald_t cp = usb_get_intfdata (intf);
2039 unsigned int u;
2040
2041 usb_set_intfdata (intf, NULL);
2042 if (!cp)
2043 return;
2044
2045 /* give back our USB minor number */
2046 usb_deregister_dev(intf, &auerswald_class);
2047
2048 mutex_lock(&cp->mutex);
2049 info ("device /dev/%s now disconnecting", cp->name);
2050
2051 /* Stop the interrupt endpoint */
2052 auerswald_int_release (cp);
2053
2054 /* remove the control chain allocated in auerswald_probe
2055 This has the benefit of
2056 a) all pending (a)synchronous urbs are unlinked
2057 b) all buffers dealing with urbs are reclaimed
2058 */
2059 auerchain_free (&cp->controlchain);
2060
2061 if (cp->open_count == 0) {
2062 /* nobody is using this device. So we can clean up now */
2063 mutex_unlock(&cp->mutex);
2064 /* mutex_unlock() is possible here because no other task
2065 can open the device (see above). I don't want
2066 to kfree() a locked mutex. */
2067
2068 auerswald_delete (cp);
2069 } else {
2070 /* device is used. Remove the pointer to the
2071 usb device (it's not valid any more). The last
2072 release() will do the clean up */
2073 cp->usbdev = NULL;
2074 mutex_unlock(&cp->mutex);
2075 /* Terminate waiting writers */
2076 wake_up (&cp->bufferwait);
2077 /* Inform all waiting readers */
2078 for ( u = 0; u < AUH_TYPESIZE; u++) {
2079 pauerscon_t scp = cp->services[u];
2080 if (scp)
2081 scp->disconnect( scp);
2082 }
2083 }
2084}
2085
2086/* Descriptor for the devices which are served by this driver.
2087 NOTE: this struct is parsed by the usbmanager install scripts.
2088 Don't change without caution!
2089*/
2090static struct usb_device_id auerswald_ids [] = {
2091 { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */
2092 { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */
2093 { USB_DEVICE (ID_AUERSWALD, 0x00DC) }, /* COMpact 4406 DSL */
2094 { USB_DEVICE (ID_AUERSWALD, 0x00DD) }, /* COMpact 2204 USB */
2095 { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */
2096 { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */
2097 { } /* Terminating entry */
2098};
2099
2100/* Standard module device table */
2101MODULE_DEVICE_TABLE (usb, auerswald_ids);
2102
2103/* Standard usb driver struct */
2104static struct usb_driver auerswald_driver = {
2105 .name = "auerswald",
2106 .probe = auerswald_probe,
2107 .disconnect = auerswald_disconnect,
2108 .id_table = auerswald_ids,
2109};
2110
2111
2112/* --------------------------------------------------------------------- */
2113/* Module loading/unloading */
2114
2115/* Driver initialisation. Called after module loading.
2116 NOTE: there is no concurrency at _init
2117*/
2118static int __init auerswald_init (void)
2119{
2120 int result;
2121 dbg ("init");
2122
2123 /* register driver at the USB subsystem */
2124 result = usb_register (&auerswald_driver);
2125 if (result < 0) {
2126 err ("driver could not be registered");
2127 return -1;
2128 }
2129 return 0;
2130}
2131
2132/* Driver deinit. Called before module removal.
2133 NOTE: there is no concurrency at _cleanup
2134*/
2135static void __exit auerswald_cleanup (void)
2136{
2137 dbg ("cleanup");
2138 usb_deregister (&auerswald_driver);
2139}
2140
2141/* --------------------------------------------------------------------- */
2142/* Linux device driver module description */
2143
2144MODULE_AUTHOR (DRIVER_AUTHOR);
2145MODULE_DESCRIPTION (DRIVER_DESC);
2146MODULE_LICENSE ("GPL");
2147
2148module_init (auerswald_init);
2149module_exit (auerswald_cleanup);
2150
2151/* --------------------------------------------------------------------- */
2152
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index d94aa7387608..b897f6554ecd 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -48,7 +48,8 @@ static int isight_firmware_load(struct usb_interface *intf,
48 48
49 if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) { 49 if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) {
50 printk(KERN_ERR "Unable to load isight firmware\n"); 50 printk(KERN_ERR "Unable to load isight firmware\n");
51 return -ENODEV; 51 ret = -ENODEV;
52 goto out;
52 } 53 }
53 54
54 ptr = firmware->data; 55 ptr = firmware->data;
@@ -91,7 +92,6 @@ static int isight_firmware_load(struct usb_interface *intf,
91 buf, llen, 300) != llen) { 92 buf, llen, 300) != llen) {
92 printk(KERN_ERR 93 printk(KERN_ERR
93 "Failed to load isight firmware\n"); 94 "Failed to load isight firmware\n");
94 kfree(buf);
95 ret = -ENODEV; 95 ret = -ENODEV;
96 goto out; 96 goto out;
97 } 97 }
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
new file mode 100644
index 000000000000..faca4333f27a
--- /dev/null
+++ b/drivers/usb/musb/Kconfig
@@ -0,0 +1,176 @@
1#
2# USB Dual Role (OTG-ready) Controller Drivers
3# for silicon based on Mentor Graphics INVENTRA designs
4#
5
6comment "Enable Host or Gadget support to see Inventra options"
7 depends on !USB && USB_GADGET=n
8
9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
10config USB_MUSB_HDRC
11 depends on (USB || USB_GADGET) && HAVE_CLK
12 select TWL4030_USB if MACH_OMAP_3430SDP
13 tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
14 help
15 Say Y here if your system has a dual role high speed USB
16 controller based on the Mentor Graphics silicon IP. Then
17 configure options to match your silicon and the board
18 it's being used with, including the USB peripheral role,
19 or the USB host role, or both.
20
21 Texas Instruments parts using this IP include DaVinci 644x,
22 OMAP 243x, OMAP 343x, and TUSB 6010.
23
24 If you do not know what this is, please say N.
25
26 To compile this driver as a module, choose M here; the
27 module will be called "musb_hdrc".
28
29config USB_MUSB_SOC
30 boolean
31 depends on USB_MUSB_HDRC
32 default y if ARCH_DAVINCI
33 default y if ARCH_OMAP2430
34 default y if ARCH_OMAP34XX
35 help
36 Use a static <asm/arch/hdrc_cnf.h> file to describe how the
37 controller is configured (endpoints, mechanisms, etc) on the
38 current iteration of a given system-on-chip.
39
40comment "DaVinci 644x USB support"
41 depends on USB_MUSB_HDRC && ARCH_DAVINCI
42
43comment "OMAP 243x high speed USB support"
44 depends on USB_MUSB_HDRC && ARCH_OMAP2430
45
46comment "OMAP 343x high speed USB support"
47 depends on USB_MUSB_HDRC && ARCH_OMAP34XX
48
49config USB_TUSB6010
50 boolean "TUSB 6010 support"
51 depends on USB_MUSB_HDRC && !USB_MUSB_SOC
52 default y
53 help
54 The TUSB 6010 chip, from Texas Instruments, connects a discrete
55 HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
56 (a high speed serial link). It can use system-specific external
57 DMA controllers.
58
59choice
60 prompt "Driver Mode"
61 depends on USB_MUSB_HDRC
62 help
63 Dual-Role devices can support both host and peripheral roles,
64 as well as a the special "OTG Device" role which can switch
65 between both roles as needed.
66
67# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
68# OTG needs both roles, not just USB_MUSB_HOST.
69config USB_MUSB_HOST
70 depends on USB
71 bool "USB Host"
72 help
73 Say Y here if your system supports the USB host role.
74 If it has a USB "A" (rectangular), "Mini-A" (uncommon),
75 or "Mini-AB" connector, it supports the host role.
76 (With a "Mini-AB" connector, you should enable USB OTG.)
77
78# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
79# side support ... OTG needs both roles
80config USB_MUSB_PERIPHERAL
81 depends on USB_GADGET
82 bool "USB Peripheral (gadget stack)"
83 select USB_GADGET_MUSB_HDRC
84 help
85 Say Y here if your system supports the USB peripheral role.
86 If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
87 connector, it supports the peripheral role.
88 (With a "Mini-AB" connector, you should enable USB OTG.)
89
90config USB_MUSB_OTG
91 depends on USB && USB_GADGET && PM && EXPERIMENTAL
92 bool "Both host and peripheral: USB OTG (On The Go) Device"
93 select USB_GADGET_MUSB_HDRC
94 select USB_OTG
95 help
96 The most notable feature of USB OTG is support for a
97 "Dual-Role" device, which can act as either a device
98 or a host. The initial role choice can be changed
99 later, when two dual-role devices talk to each other.
100
101 At this writing, the OTG support in this driver is incomplete,
102 omitting the mandatory HNP or SRP protocols. However, some
103 of the cable based role switching works. (That is, grounding
104 the ID pin switches the controller to host mode, while leaving
105 it floating leaves it in peripheral mode.)
106
107 Select this if your system has a Mini-AB connector, or
108 to simplify certain kinds of configuration.
109
110 To implement your OTG Targeted Peripherals List (TPL), enable
111 USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
112 to match your requirements.
113
114endchoice
115
116# enable peripheral support (including with OTG)
117config USB_GADGET_MUSB_HDRC
118 bool
119 depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
120# default y
121# select USB_GADGET_DUALSPEED
122# select USB_GADGET_SELECTED
123
124# enables host support (including with OTG)
125config USB_MUSB_HDRC_HCD
126 bool
127 depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
128 select USB_OTG if USB_GADGET_MUSB_HDRC
129 default y
130
131
132config MUSB_PIO_ONLY
133 bool 'Disable DMA (always use PIO)'
134 depends on USB_MUSB_HDRC
135 default y if USB_TUSB6010
136 help
137 All data is copied between memory and FIFO by the CPU.
138 DMA controllers are ignored.
139
140 Do not select 'n' here unless DMA support for your SOC or board
141 is unavailable (or unstable). When DMA is enabled at compile time,
142 you can still disable it at run time using the "use_dma=n" module
143 parameter.
144
145config USB_INVENTRA_DMA
146 bool
147 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
148 default ARCH_OMAP2430 || ARCH_OMAP34XX
149 help
150 Enable DMA transfers using Mentor's engine.
151
152config USB_TI_CPPI_DMA
153 bool
154 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
155 default ARCH_DAVINCI
156 help
157 Enable DMA transfers when TI CPPI DMA is available.
158
159config USB_TUSB_OMAP_DMA
160 bool
161 depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
162 depends on USB_TUSB6010
163 depends on ARCH_OMAP
164 default y
165 help
166 Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
167
168config USB_MUSB_LOGLEVEL
169 depends on USB_MUSB_HDRC
170 int 'Logging Level (0 - none / 3 - annoying / ... )'
171 default 0
172 help
173 Set the logging level. 0 disables the debugging altogether,
174 although when USB_DEBUG is set the value is at least 1.
175 Starting at level 3, per-transfer (urb, usb_request, packet,
176 or dma transfer) tracing may kick in.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
new file mode 100644
index 000000000000..88eb67de08ae
--- /dev/null
+++ b/drivers/usb/musb/Makefile
@@ -0,0 +1,86 @@
1#
2# for USB OTG silicon based on Mentor Graphics INVENTRA designs
3#
4
5musb_hdrc-objs := musb_core.o
6
7obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
8
9ifeq ($(CONFIG_ARCH_DAVINCI),y)
10 musb_hdrc-objs += davinci.o
11endif
12
13ifeq ($(CONFIG_USB_TUSB6010),y)
14 musb_hdrc-objs += tusb6010.o
15endif
16
17ifeq ($(CONFIG_ARCH_OMAP2430),y)
18 musb_hdrc-objs += omap2430.o
19endif
20
21ifeq ($(CONFIG_ARCH_OMAP3430),y)
22 musb_hdrc-objs += omap2430.o
23endif
24
25ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
26 musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o
27endif
28
29ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
30 musb_hdrc-objs += musb_virthub.o musb_host.o
31endif
32
33# the kconfig must guarantee that only one of the
34# possible I/O schemes will be enabled at a time ...
35# PIO only, or DMA (several potential schemes).
36# though PIO is always there to back up DMA, and for ep0
37
38ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
39
40 ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
41 musb_hdrc-objs += musbhsdma.o
42
43 else
44 ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
45 musb_hdrc-objs += cppi_dma.o
46
47 else
48 ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
49 musb_hdrc-objs += tusb6010_omap.o
50
51 endif
52 endif
53 endif
54endif
55
56
57################################################################################
58
59# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
60
61ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
62 EXTRA_CFLAGS += -DMUSB_AHB_ID
63endif
64
65# Debugging
66
67MUSB_DEBUG:=$(CONFIG_USB_MUSB_LOGLEVEL)
68
69ifeq ("$(strip $(MUSB_DEBUG))","")
70 ifdef CONFIG_USB_DEBUG
71 MUSB_DEBUG:=1
72 else
73 MUSB_DEBUG:=0
74 endif
75endif
76
77ifneq ($(MUSB_DEBUG),0)
78 EXTRA_CFLAGS += -DDEBUG
79
80 ifeq ($(CONFIG_PROC_FS),y)
81 musb_hdrc-objs += musb_procfs.o
82 endif
83
84endif
85
86EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG)
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
new file mode 100644
index 000000000000..5ad6d0893cbe
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.c
@@ -0,0 +1,1540 @@
1/*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 *
4 * This file implements a DMA interface using TI's CPPI DMA.
5 * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
6 * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
7 */
8
9#include <linux/usb.h>
10
11#include "musb_core.h"
12#include "cppi_dma.h"
13
14
15/* CPPI DMA status 7-mar-2006:
16 *
17 * - See musb_{host,gadget}.c for more info
18 *
19 * - Correct RX DMA generally forces the engine into irq-per-packet mode,
20 * which can easily saturate the CPU under non-mass-storage loads.
21 *
22 * NOTES 24-aug-2006 (2.6.18-rc4):
23 *
24 * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
25 * evidently after the 1 byte packet was received and acked, the queue
26 * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
27 * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
28 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
29 * of its next (512 byte) packet. IRQ issues?
30 *
31 * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
32 * evidently also directly update the RX and TX CSRs ... so audit all
33 * host and peripheral side DMA code to avoid CSR access after DMA has
34 * been started.
35 */
36
37/* REVISIT now we can avoid preallocating these descriptors; or
38 * more simply, switch to a global freelist not per-channel ones.
39 * Note: at full speed, 64 descriptors == 4K bulk data.
40 */
41#define NUM_TXCHAN_BD 64
42#define NUM_RXCHAN_BD 64
43
44static inline void cpu_drain_writebuffer(void)
45{
46 wmb();
47#ifdef CONFIG_CPU_ARM926T
48 /* REVISIT this "should not be needed",
49 * but lack of it sure seemed to hurt ...
50 */
51 asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
52#endif
53}
54
55static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
56{
57 struct cppi_descriptor *bd = c->freelist;
58
59 if (bd)
60 c->freelist = bd->next;
61 return bd;
62}
63
64static inline void
65cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
66{
67 if (!bd)
68 return;
69 bd->next = c->freelist;
70 c->freelist = bd;
71}
72
73/*
74 * Start DMA controller
75 *
76 * Initialize the DMA controller as necessary.
77 */
78
79/* zero out entire rx state RAM entry for the channel */
80static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
81{
82 musb_writel(&rx->rx_skipbytes, 0, 0);
83 musb_writel(&rx->rx_head, 0, 0);
84 musb_writel(&rx->rx_sop, 0, 0);
85 musb_writel(&rx->rx_current, 0, 0);
86 musb_writel(&rx->rx_buf_current, 0, 0);
87 musb_writel(&rx->rx_len_len, 0, 0);
88 musb_writel(&rx->rx_cnt_cnt, 0, 0);
89}
90
91/* zero out entire tx state RAM entry for the channel */
92static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
93{
94 musb_writel(&tx->tx_head, 0, 0);
95 musb_writel(&tx->tx_buf, 0, 0);
96 musb_writel(&tx->tx_current, 0, 0);
97 musb_writel(&tx->tx_buf_current, 0, 0);
98 musb_writel(&tx->tx_info, 0, 0);
99 musb_writel(&tx->tx_rem_len, 0, 0);
100 /* musb_writel(&tx->tx_dummy, 0, 0); */
101 musb_writel(&tx->tx_complete, 0, ptr);
102}
103
104static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
105{
106 int j;
107
108 /* initialize channel fields */
109 c->head = NULL;
110 c->tail = NULL;
111 c->last_processed = NULL;
112 c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
113 c->controller = cppi;
114 c->is_rndis = 0;
115 c->freelist = NULL;
116
117 /* build the BD Free list for the channel */
118 for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
119 struct cppi_descriptor *bd;
120 dma_addr_t dma;
121
122 bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
123 bd->dma = dma;
124 cppi_bd_free(c, bd);
125 }
126}
127
128static int cppi_channel_abort(struct dma_channel *);
129
130static void cppi_pool_free(struct cppi_channel *c)
131{
132 struct cppi *cppi = c->controller;
133 struct cppi_descriptor *bd;
134
135 (void) cppi_channel_abort(&c->channel);
136 c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
137 c->controller = NULL;
138
139 /* free all its bds */
140 bd = c->last_processed;
141 do {
142 if (bd)
143 dma_pool_free(cppi->pool, bd, bd->dma);
144 bd = cppi_bd_alloc(c);
145 } while (bd);
146 c->last_processed = NULL;
147}
148
149static int __init cppi_controller_start(struct dma_controller *c)
150{
151 struct cppi *controller;
152 void __iomem *tibase;
153 int i;
154
155 controller = container_of(c, struct cppi, controller);
156
157 /* do whatever is necessary to start controller */
158 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
159 controller->tx[i].transmit = true;
160 controller->tx[i].index = i;
161 }
162 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
163 controller->rx[i].transmit = false;
164 controller->rx[i].index = i;
165 }
166
167 /* setup BD list on a per channel basis */
168 for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
169 cppi_pool_init(controller, controller->tx + i);
170 for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
171 cppi_pool_init(controller, controller->rx + i);
172
173 tibase = controller->tibase;
174 INIT_LIST_HEAD(&controller->tx_complete);
175
176 /* initialise tx/rx channel head pointers to zero */
177 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
178 struct cppi_channel *tx_ch = controller->tx + i;
179 struct cppi_tx_stateram __iomem *tx;
180
181 INIT_LIST_HEAD(&tx_ch->tx_complete);
182
183 tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
184 tx_ch->state_ram = tx;
185 cppi_reset_tx(tx, 0);
186 }
187 for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
188 struct cppi_channel *rx_ch = controller->rx + i;
189 struct cppi_rx_stateram __iomem *rx;
190
191 INIT_LIST_HEAD(&rx_ch->tx_complete);
192
193 rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
194 rx_ch->state_ram = rx;
195 cppi_reset_rx(rx);
196 }
197
198 /* enable individual cppi channels */
199 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
200 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
201 musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
202 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
203
204 /* enable tx/rx CPPI control */
205 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
206 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
207
208 /* disable RNDIS mode, also host rx RNDIS autorequest */
209 musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
210 musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
211
212 return 0;
213}
214
215/*
216 * Stop DMA controller
217 *
218 * De-Init the DMA controller as necessary.
219 */
220
221static int cppi_controller_stop(struct dma_controller *c)
222{
223 struct cppi *controller;
224 void __iomem *tibase;
225 int i;
226
227 controller = container_of(c, struct cppi, controller);
228
229 tibase = controller->tibase;
230 /* DISABLE INDIVIDUAL CHANNEL Interrupts */
231 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
232 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
233 musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
234 DAVINCI_DMA_ALL_CHANNELS_ENABLE);
235
236 DBG(1, "Tearing down RX and TX Channels\n");
237 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
238 /* FIXME restructure of txdma to use bds like rxdma */
239 controller->tx[i].last_processed = NULL;
240 cppi_pool_free(controller->tx + i);
241 }
242 for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
243 cppi_pool_free(controller->rx + i);
244
245 /* in Tx Case proper teardown is supported. We resort to disabling
246 * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
247 * complete TX CPPI cannot be disabled.
248 */
249 /*disable tx/rx cppi */
250 musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
251 musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
252
253 return 0;
254}
255
256/* While dma channel is allocated, we only want the core irqs active
257 * for fault reports, otherwise we'd get irqs that we don't care about.
258 * Except for TX irqs, where dma done != fifo empty and reusable ...
259 *
260 * NOTE: docs don't say either way, but irq masking **enables** irqs.
261 *
262 * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
263 */
264static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
265{
266 musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
267}
268
269static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
270{
271 musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
272}
273
274
275/*
276 * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
277 * each transfer direction of a non-control endpoint, so allocating
278 * (and deallocating) is mostly a way to notice bad housekeeping on
279 * the software side. We assume the irqs are always active.
280 */
281static struct dma_channel *
282cppi_channel_allocate(struct dma_controller *c,
283 struct musb_hw_ep *ep, u8 transmit)
284{
285 struct cppi *controller;
286 u8 index;
287 struct cppi_channel *cppi_ch;
288 void __iomem *tibase;
289
290 controller = container_of(c, struct cppi, controller);
291 tibase = controller->tibase;
292
293 /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
294 index = ep->epnum - 1;
295
296 /* return the corresponding CPPI Channel Handle, and
297 * probably disable the non-CPPI irq until we need it.
298 */
299 if (transmit) {
300 if (index >= ARRAY_SIZE(controller->tx)) {
301 DBG(1, "no %cX%d CPPI channel\n", 'T', index);
302 return NULL;
303 }
304 cppi_ch = controller->tx + index;
305 } else {
306 if (index >= ARRAY_SIZE(controller->rx)) {
307 DBG(1, "no %cX%d CPPI channel\n", 'R', index);
308 return NULL;
309 }
310 cppi_ch = controller->rx + index;
311 core_rxirq_disable(tibase, ep->epnum);
312 }
313
314 /* REVISIT make this an error later once the same driver code works
315 * with the other DMA engine too
316 */
317 if (cppi_ch->hw_ep)
318 DBG(1, "re-allocating DMA%d %cX channel %p\n",
319 index, transmit ? 'T' : 'R', cppi_ch);
320 cppi_ch->hw_ep = ep;
321 cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
322
323 DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
324 return &cppi_ch->channel;
325}
326
327/* Release a CPPI Channel. */
328static void cppi_channel_release(struct dma_channel *channel)
329{
330 struct cppi_channel *c;
331 void __iomem *tibase;
332
333 /* REVISIT: for paranoia, check state and abort if needed... */
334
335 c = container_of(channel, struct cppi_channel, channel);
336 tibase = c->controller->tibase;
337 if (!c->hw_ep)
338 DBG(1, "releasing idle DMA channel %p\n", c);
339 else if (!c->transmit)
340 core_rxirq_enable(tibase, c->index + 1);
341
342 /* for now, leave its cppi IRQ enabled (we won't trigger it) */
343 c->hw_ep = NULL;
344 channel->status = MUSB_DMA_STATUS_UNKNOWN;
345}
346
347/* Context: controller irqlocked */
348static void
349cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
350{
351 void __iomem *base = c->controller->mregs;
352 struct cppi_rx_stateram __iomem *rx = c->state_ram;
353
354 musb_ep_select(base, c->index + 1);
355
356 DBG(level, "RX DMA%d%s: %d left, csr %04x, "
357 "%08x H%08x S%08x C%08x, "
358 "B%08x L%08x %08x .. %08x"
359 "\n",
360 c->index, tag,
361 musb_readl(c->controller->tibase,
362 DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
363 musb_readw(c->hw_ep->regs, MUSB_RXCSR),
364
365 musb_readl(&rx->rx_skipbytes, 0),
366 musb_readl(&rx->rx_head, 0),
367 musb_readl(&rx->rx_sop, 0),
368 musb_readl(&rx->rx_current, 0),
369
370 musb_readl(&rx->rx_buf_current, 0),
371 musb_readl(&rx->rx_len_len, 0),
372 musb_readl(&rx->rx_cnt_cnt, 0),
373 musb_readl(&rx->rx_complete, 0)
374 );
375}
376
377/* Context: controller irqlocked */
378static void
379cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
380{
381 void __iomem *base = c->controller->mregs;
382 struct cppi_tx_stateram __iomem *tx = c->state_ram;
383
384 musb_ep_select(base, c->index + 1);
385
386 DBG(level, "TX DMA%d%s: csr %04x, "
387 "H%08x S%08x C%08x %08x, "
388 "F%08x L%08x .. %08x"
389 "\n",
390 c->index, tag,
391 musb_readw(c->hw_ep->regs, MUSB_TXCSR),
392
393 musb_readl(&tx->tx_head, 0),
394 musb_readl(&tx->tx_buf, 0),
395 musb_readl(&tx->tx_current, 0),
396 musb_readl(&tx->tx_buf_current, 0),
397
398 musb_readl(&tx->tx_info, 0),
399 musb_readl(&tx->tx_rem_len, 0),
400 /* dummy/unused word 6 */
401 musb_readl(&tx->tx_complete, 0)
402 );
403}
404
405/* Context: controller irqlocked */
406static inline void
407cppi_rndis_update(struct cppi_channel *c, int is_rx,
408 void __iomem *tibase, int is_rndis)
409{
410 /* we may need to change the rndis flag for this cppi channel */
411 if (c->is_rndis != is_rndis) {
412 u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
413 u32 temp = 1 << (c->index);
414
415 if (is_rx)
416 temp <<= 16;
417 if (is_rndis)
418 value |= temp;
419 else
420 value &= ~temp;
421 musb_writel(tibase, DAVINCI_RNDIS_REG, value);
422 c->is_rndis = is_rndis;
423 }
424}
425
426static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
427{
428 pr_debug("RXBD/%s %08x: "
429 "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
430 tag, bd->dma,
431 bd->hw_next, bd->hw_bufp, bd->hw_off_len,
432 bd->hw_options);
433}
434
435static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
436{
437#if MUSB_DEBUG > 0
438 struct cppi_descriptor *bd;
439
440 if (!_dbg_level(level))
441 return;
442 cppi_dump_rx(level, rx, tag);
443 if (rx->last_processed)
444 cppi_dump_rxbd("last", rx->last_processed);
445 for (bd = rx->head; bd; bd = bd->next)
446 cppi_dump_rxbd("active", bd);
447#endif
448}
449
450
451/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
452 * so we won't ever use it (see "CPPI RX Woes" below).
453 */
454static inline int cppi_autoreq_update(struct cppi_channel *rx,
455 void __iomem *tibase, int onepacket, unsigned n_bds)
456{
457 u32 val;
458
459#ifdef RNDIS_RX_IS_USABLE
460 u32 tmp;
461 /* assert(is_host_active(musb)) */
462
463 /* start from "AutoReq never" */
464 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
465 val = tmp & ~((0x3) << (rx->index * 2));
466
467 /* HCD arranged reqpkt for packet #1. we arrange int
468 * for all but the last one, maybe in two segments.
469 */
470 if (!onepacket) {
471#if 0
472 /* use two segments, autoreq "all" then the last "never" */
473 val |= ((0x3) << (rx->index * 2));
474 n_bds--;
475#else
476 /* one segment, autoreq "all-but-last" */
477 val |= ((0x1) << (rx->index * 2));
478#endif
479 }
480
481 if (val != tmp) {
482 int n = 100;
483
484 /* make sure that autoreq is updated before continuing */
485 musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
486 do {
487 tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
488 if (tmp == val)
489 break;
490 cpu_relax();
491 } while (n-- > 0);
492 }
493#endif
494
495 /* REQPKT is turned off after each segment */
496 if (n_bds && rx->channel.actual_len) {
497 void __iomem *regs = rx->hw_ep->regs;
498
499 val = musb_readw(regs, MUSB_RXCSR);
500 if (!(val & MUSB_RXCSR_H_REQPKT)) {
501 val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
502 musb_writew(regs, MUSB_RXCSR, val);
503 /* flush writebufer */
504 val = musb_readw(regs, MUSB_RXCSR);
505 }
506 }
507 return n_bds;
508}
509
510
511/* Buffer enqueuing Logic:
512 *
513 * - RX builds new queues each time, to help handle routine "early
514 * termination" cases (faults, including errors and short reads)
515 * more correctly.
516 *
517 * - for now, TX reuses the same queue of BDs every time
518 *
519 * REVISIT long term, we want a normal dynamic model.
520 * ... the goal will be to append to the
521 * existing queue, processing completed "dma buffers" (segments) on the fly.
522 *
523 * Otherwise we force an IRQ latency between requests, which slows us a lot
524 * (especially in "transparent" dma). Unfortunately that model seems to be
525 * inherent in the DMA model from the Mentor code, except in the rare case
526 * of transfers big enough (~128+ KB) that we could append "middle" segments
527 * in the TX paths. (RX can't do this, see below.)
528 *
529 * That's true even in the CPPI- friendly iso case, where most urbs have
530 * several small segments provided in a group and where the "packet at a time"
531 * "transparent" DMA model is always correct, even on the RX side.
532 */
533
534/*
535 * CPPI TX:
536 * ========
537 * TX is a lot more reasonable than RX; it doesn't need to run in
538 * irq-per-packet mode very often. RNDIS mode seems to behave too
539 * (except how it handles the exactly-N-packets case). Building a
540 * txdma queue with multiple requests (urb or usb_request) looks
541 * like it would work ... but fault handling would need much testing.
542 *
543 * The main issue with TX mode RNDIS relates to transfer lengths that
544 * are an exact multiple of the packet length. It appears that there's
545 * a hiccup in that case (maybe the DMA completes before the ZLP gets
546 * written?) boiling down to not being able to rely on CPPI writing any
547 * terminating zero length packet before the next transfer is written.
548 * So that's punted to PIO; better yet, gadget drivers can avoid it.
549 *
550 * Plus, there's allegedly an undocumented constraint that rndis transfer
551 * length be a multiple of 64 bytes ... but the chip doesn't act that
552 * way, and we really don't _want_ that behavior anyway.
553 *
554 * On TX, "transparent" mode works ... although experiments have shown
555 * problems trying to use the SOP/EOP bits in different USB packets.
556 *
557 * REVISIT try to handle terminating zero length packets using CPPI
558 * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
559 * links avoid that issue by forcing them to avoid zlps.)
560 */
561static void
562cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
563{
564 unsigned maxpacket = tx->maxpacket;
565 dma_addr_t addr = tx->buf_dma + tx->offset;
566 size_t length = tx->buf_len - tx->offset;
567 struct cppi_descriptor *bd;
568 unsigned n_bds;
569 unsigned i;
570 struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
571 int rndis;
572
573 /* TX can use the CPPI "rndis" mode, where we can probably fit this
574 * transfer in one BD and one IRQ. The only time we would NOT want
575 * to use it is when hardware constraints prevent it, or if we'd
576 * trigger the "send a ZLP?" confusion.
577 */
578 rndis = (maxpacket & 0x3f) == 0
579 && length < 0xffff
580 && (length % maxpacket) != 0;
581
582 if (rndis) {
583 maxpacket = length;
584 n_bds = 1;
585 } else {
586 n_bds = length / maxpacket;
587 if (!length || (length % maxpacket))
588 n_bds++;
589 n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
590 length = min(n_bds * maxpacket, length);
591 }
592
593 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
594 tx->index,
595 maxpacket,
596 rndis ? "rndis" : "transparent",
597 n_bds,
598 addr, length);
599
600 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
601
602 /* assuming here that channel_program is called during
603 * transfer initiation ... current code maintains state
604 * for one outstanding request only (no queues, not even
605 * the implicit ones of an iso urb).
606 */
607
608 bd = tx->freelist;
609 tx->head = bd;
610 tx->last_processed = NULL;
611
612 /* FIXME use BD pool like RX side does, and just queue
613 * the minimum number for this request.
614 */
615
616 /* Prepare queue of BDs first, then hand it to hardware.
617 * All BDs except maybe the last should be of full packet
618 * size; for RNDIS there _is_ only that last packet.
619 */
620 for (i = 0; i < n_bds; ) {
621 if (++i < n_bds && bd->next)
622 bd->hw_next = bd->next->dma;
623 else
624 bd->hw_next = 0;
625
626 bd->hw_bufp = tx->buf_dma + tx->offset;
627
628 /* FIXME set EOP only on the last packet,
629 * SOP only on the first ... avoid IRQs
630 */
631 if ((tx->offset + maxpacket) <= tx->buf_len) {
632 tx->offset += maxpacket;
633 bd->hw_off_len = maxpacket;
634 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
635 | CPPI_OWN_SET | maxpacket;
636 } else {
637 /* only this one may be a partial USB Packet */
638 u32 partial_len;
639
640 partial_len = tx->buf_len - tx->offset;
641 tx->offset = tx->buf_len;
642 bd->hw_off_len = partial_len;
643
644 bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
645 | CPPI_OWN_SET | partial_len;
646 if (partial_len == 0)
647 bd->hw_options |= CPPI_ZERO_SET;
648 }
649
650 DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
651 bd, bd->hw_next, bd->hw_bufp,
652 bd->hw_off_len, bd->hw_options);
653
654 /* update the last BD enqueued to the list */
655 tx->tail = bd;
656 bd = bd->next;
657 }
658
659 /* BDs live in DMA-coherent memory, but writes might be pending */
660 cpu_drain_writebuffer();
661
662 /* Write to the HeadPtr in state RAM to trigger */
663 musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
664
665 cppi_dump_tx(5, tx, "/S");
666}
667
668/*
669 * CPPI RX Woes:
670 * =============
671 * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
672 * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
673 * (Full speed transfers have similar scenarios.)
674 *
675 * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
676 * and the next packet goes into a buffer that's queued later; while (b) fills
677 * the buffer with 1024 bytes. How to do that with CPPI?
678 *
679 * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
680 * (b) loses **BADLY** because nothing (!) happens when that second packet
681 * fills the buffer, much less when a third one arrives. (Which makes this
682 * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
683 * is optional, and it's fine if peripherals -- not hosts! -- pad messages
684 * out to end-of-buffer. Standard PCI host controller DMA descriptors
685 * implement that mode by default ... which is no accident.)
686 *
687 * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
688 * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
689 * ignores SOP/EOP markings and processes both of those BDs; so both packets
690 * are loaded into the buffer (with a 212 byte gap between them), and the next
691 * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
692 * are intended as outputs for RX queues, not inputs...)
693 *
694 * - A variant of "transparent" mode -- one BD at a time -- is the only way to
695 * reliably make both cases work, with software handling both cases correctly
696 * and at the significant penalty of needing an IRQ per packet. (The lack of
697 * I/O overlap can be slightly ameliorated by enabling double buffering.)
698 *
699 * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
700 * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
701 * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
702 * with guaranteed driver level fault recovery and scrubbing out what's left
703 * of that garbaged datastream.
704 *
705 * But there seems to be no way to identify the cases where CPPI RNDIS mode
706 * is appropriate -- which do NOT include RNDIS host drivers, but do include
707 * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
708 * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
709 * that applies best on the peripheral side (and which could fail rudely).
710 *
711 * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
712 * cases other than mass storage class. Otherwise we're correct but slow,
713 * since CPPI penalizes our need for a "true RNDIS" default mode.
714 */
715
716
717/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
718 *
719 * IFF
720 * (a) peripheral mode ... since rndis peripherals could pad their
721 * writes to hosts, causing i/o failure; or we'd have to cope with
722 * a largely unknowable variety of host side protocol variants
723 * (b) and short reads are NOT errors ... since full reads would
724 * cause those same i/o failures
725 * (c) and read length is
726 * - less than 64KB (max per cppi descriptor)
727 * - not a multiple of 4096 (g_zero default, full reads typical)
728 * - N (>1) packets long, ditto (full reads not EXPECTED)
729 * THEN
730 * try rx rndis mode
731 *
732 * Cost of heuristic failing: RXDMA wedges at the end of transfers that
733 * fill out the whole buffer. Buggy host side usb network drivers could
734 * trigger that, but "in the field" such bugs seem to be all but unknown.
735 *
736 * So this module parameter lets the heuristic be disabled. When using
737 * gadgetfs, the heuristic will probably need to be disabled.
738 */
739static int cppi_rx_rndis = 1;
740
741module_param(cppi_rx_rndis, bool, 0);
742MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
743
744
745/**
746 * cppi_next_rx_segment - dma read for the next chunk of a buffer
747 * @musb: the controller
748 * @rx: dma channel
749 * @onepacket: true unless caller treats short reads as errors, and
750 * performs fault recovery above usbcore.
751 * Context: controller irqlocked
752 *
753 * See above notes about why we can't use multi-BD RX queues except in
754 * rare cases (mass storage class), and can never use the hardware "rndis"
755 * mode (since it's not a "true" RNDIS mode) with complete safety..
756 *
757 * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
758 * code to recover from corrupted datastreams after each short transfer.
759 */
760static void
761cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
762{
763 unsigned maxpacket = rx->maxpacket;
764 dma_addr_t addr = rx->buf_dma + rx->offset;
765 size_t length = rx->buf_len - rx->offset;
766 struct cppi_descriptor *bd, *tail;
767 unsigned n_bds;
768 unsigned i;
769 void __iomem *tibase = musb->ctrl_base;
770 int is_rndis = 0;
771 struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
772
773 if (onepacket) {
774 /* almost every USB driver, host or peripheral side */
775 n_bds = 1;
776
777 /* maybe apply the heuristic above */
778 if (cppi_rx_rndis
779 && is_peripheral_active(musb)
780 && length > maxpacket
781 && (length & ~0xffff) == 0
782 && (length & 0x0fff) != 0
783 && (length & (maxpacket - 1)) == 0) {
784 maxpacket = length;
785 is_rndis = 1;
786 }
787 } else {
788 /* virtually nothing except mass storage class */
789 if (length > 0xffff) {
790 n_bds = 0xffff / maxpacket;
791 length = n_bds * maxpacket;
792 } else {
793 n_bds = length / maxpacket;
794 if (length % maxpacket)
795 n_bds++;
796 }
797 if (n_bds == 1)
798 onepacket = 1;
799 else
800 n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
801 }
802
803 /* In host mode, autorequest logic can generate some IN tokens; it's
804 * tricky since we can't leave REQPKT set in RXCSR after the transfer
805 * finishes. So: multipacket transfers involve two or more segments.
806 * And always at least two IRQs ... RNDIS mode is not an option.
807 */
808 if (is_host_active(musb))
809 n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
810
811 cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
812
813 length = min(n_bds * maxpacket, length);
814
815 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
816 "dma 0x%x len %u %u/%u\n",
817 rx->index, maxpacket,
818 onepacket
819 ? (is_rndis ? "rndis" : "onepacket")
820 : "multipacket",
821 n_bds,
822 musb_readl(tibase,
823 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
824 & 0xffff,
825 addr, length, rx->channel.actual_len, rx->buf_len);
826
827 /* only queue one segment at a time, since the hardware prevents
828 * correct queue shutdown after unexpected short packets
829 */
830 bd = cppi_bd_alloc(rx);
831 rx->head = bd;
832
833 /* Build BDs for all packets in this segment */
834 for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
835 u32 bd_len;
836
837 if (i) {
838 bd = cppi_bd_alloc(rx);
839 if (!bd)
840 break;
841 tail->next = bd;
842 tail->hw_next = bd->dma;
843 }
844 bd->hw_next = 0;
845
846 /* all but the last packet will be maxpacket size */
847 if (maxpacket < length)
848 bd_len = maxpacket;
849 else
850 bd_len = length;
851
852 bd->hw_bufp = addr;
853 addr += bd_len;
854 rx->offset += bd_len;
855
856 bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
857 bd->buflen = bd_len;
858
859 bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
860 length -= bd_len;
861 }
862
863 /* we always expect at least one reusable BD! */
864 if (!tail) {
865 WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
866 return;
867 } else if (i < n_bds)
868 WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
869
870 tail->next = NULL;
871 tail->hw_next = 0;
872
873 bd = rx->head;
874 rx->tail = tail;
875
876 /* short reads and other faults should terminate this entire
877 * dma segment. we want one "dma packet" per dma segment, not
878 * one per USB packet, terminating the whole queue at once...
879 * NOTE that current hardware seems to ignore SOP and EOP.
880 */
881 bd->hw_options |= CPPI_SOP_SET;
882 tail->hw_options |= CPPI_EOP_SET;
883
884 if (debug >= 5) {
885 struct cppi_descriptor *d;
886
887 for (d = rx->head; d; d = d->next)
888 cppi_dump_rxbd("S", d);
889 }
890
891 /* in case the preceding transfer left some state... */
892 tail = rx->last_processed;
893 if (tail) {
894 tail->next = bd;
895 tail->hw_next = bd->dma;
896 }
897
898 core_rxirq_enable(tibase, rx->index + 1);
899
900 /* BDs live in DMA-coherent memory, but writes might be pending */
901 cpu_drain_writebuffer();
902
903 /* REVISIT specs say to write this AFTER the BUFCNT register
904 * below ... but that loses badly.
905 */
906 musb_writel(&rx_ram->rx_head, 0, bd->dma);
907
908 /* bufferCount must be at least 3, and zeroes on completion
909 * unless it underflows below zero, or stops at two, or keeps
910 * growing ... grr.
911 */
912 i = musb_readl(tibase,
913 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
914 & 0xffff;
915
916 if (!i)
917 musb_writel(tibase,
918 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
919 n_bds + 2);
920 else if (n_bds > (i - 3))
921 musb_writel(tibase,
922 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
923 n_bds - (i - 3));
924
925 i = musb_readl(tibase,
926 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
927 & 0xffff;
928 if (i < (2 + n_bds)) {
929 DBG(2, "bufcnt%d underrun - %d (for %d)\n",
930 rx->index, i, n_bds);
931 musb_writel(tibase,
932 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
933 n_bds + 2);
934 }
935
936 cppi_dump_rx(4, rx, "/S");
937}
938
939/**
940 * cppi_channel_program - program channel for data transfer
941 * @ch: the channel
942 * @maxpacket: max packet size
943 * @mode: For RX, 1 unless the usb protocol driver promised to treat
944 * all short reads as errors and kick in high level fault recovery.
945 * For TX, ignored because of RNDIS mode races/glitches.
946 * @dma_addr: dma address of buffer
947 * @len: length of buffer
948 * Context: controller irqlocked
949 */
950static int cppi_channel_program(struct dma_channel *ch,
951 u16 maxpacket, u8 mode,
952 dma_addr_t dma_addr, u32 len)
953{
954 struct cppi_channel *cppi_ch;
955 struct cppi *controller;
956 struct musb *musb;
957
958 cppi_ch = container_of(ch, struct cppi_channel, channel);
959 controller = cppi_ch->controller;
960 musb = controller->musb;
961
962 switch (ch->status) {
963 case MUSB_DMA_STATUS_BUS_ABORT:
964 case MUSB_DMA_STATUS_CORE_ABORT:
965 /* fault irq handler should have handled cleanup */
966 WARNING("%cX DMA%d not cleaned up after abort!\n",
967 cppi_ch->transmit ? 'T' : 'R',
968 cppi_ch->index);
969 /* WARN_ON(1); */
970 break;
971 case MUSB_DMA_STATUS_BUSY:
972 WARNING("program active channel? %cX DMA%d\n",
973 cppi_ch->transmit ? 'T' : 'R',
974 cppi_ch->index);
975 /* WARN_ON(1); */
976 break;
977 case MUSB_DMA_STATUS_UNKNOWN:
978 DBG(1, "%cX DMA%d not allocated!\n",
979 cppi_ch->transmit ? 'T' : 'R',
980 cppi_ch->index);
981 /* FALLTHROUGH */
982 case MUSB_DMA_STATUS_FREE:
983 break;
984 }
985
986 ch->status = MUSB_DMA_STATUS_BUSY;
987
988 /* set transfer parameters, then queue up its first segment */
989 cppi_ch->buf_dma = dma_addr;
990 cppi_ch->offset = 0;
991 cppi_ch->maxpacket = maxpacket;
992 cppi_ch->buf_len = len;
993
994 /* TX channel? or RX? */
995 if (cppi_ch->transmit)
996 cppi_next_tx_segment(musb, cppi_ch);
997 else
998 cppi_next_rx_segment(musb, cppi_ch, mode);
999
1000 return true;
1001}
1002
1003static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1004{
1005 struct cppi_channel *rx = &cppi->rx[ch];
1006 struct cppi_rx_stateram __iomem *state = rx->state_ram;
1007 struct cppi_descriptor *bd;
1008 struct cppi_descriptor *last = rx->last_processed;
1009 bool completed = false;
1010 bool acked = false;
1011 int i;
1012 dma_addr_t safe2ack;
1013 void __iomem *regs = rx->hw_ep->regs;
1014
1015 cppi_dump_rx(6, rx, "/K");
1016
1017 bd = last ? last->next : rx->head;
1018 if (!bd)
1019 return false;
1020
1021 /* run through all completed BDs */
1022 for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
1023 (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
1024 i++, bd = bd->next) {
1025 u16 len;
1026
1027 /* catch latest BD writes from CPPI */
1028 rmb();
1029 if (!completed && (bd->hw_options & CPPI_OWN_SET))
1030 break;
1031
1032 DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
1033 "off.len %08x opt.len %08x (%d)\n",
1034 bd->dma, bd->hw_next, bd->hw_bufp,
1035 bd->hw_off_len, bd->hw_options,
1036 rx->channel.actual_len);
1037
1038 /* actual packet received length */
1039 if ((bd->hw_options & CPPI_SOP_SET) && !completed)
1040 len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
1041 else
1042 len = 0;
1043
1044 if (bd->hw_options & CPPI_EOQ_MASK)
1045 completed = true;
1046
1047 if (!completed && len < bd->buflen) {
1048 /* NOTE: when we get a short packet, RXCSR_H_REQPKT
1049 * must have been cleared, and no more DMA packets may
1050 * active be in the queue... TI docs didn't say, but
1051 * CPPI ignores those BDs even though OWN is still set.
1052 */
1053 completed = true;
1054 DBG(3, "rx short %d/%d (%d)\n",
1055 len, bd->buflen,
1056 rx->channel.actual_len);
1057 }
1058
1059 /* If we got here, we expect to ack at least one BD; meanwhile
1060 * CPPI may completing other BDs while we scan this list...
1061 *
1062 * RACE: we can notice OWN cleared before CPPI raises the
1063 * matching irq by writing that BD as the completion pointer.
1064 * In such cases, stop scanning and wait for the irq, avoiding
1065 * lost acks and states where BD ownership is unclear.
1066 */
1067 if (bd->dma == safe2ack) {
1068 musb_writel(&state->rx_complete, 0, safe2ack);
1069 safe2ack = musb_readl(&state->rx_complete, 0);
1070 acked = true;
1071 if (bd->dma == safe2ack)
1072 safe2ack = 0;
1073 }
1074
1075 rx->channel.actual_len += len;
1076
1077 cppi_bd_free(rx, last);
1078 last = bd;
1079
1080 /* stop scanning on end-of-segment */
1081 if (bd->hw_next == 0)
1082 completed = true;
1083 }
1084 rx->last_processed = last;
1085
1086 /* dma abort, lost ack, or ... */
1087 if (!acked && last) {
1088 int csr;
1089
1090 if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
1091 musb_writel(&state->rx_complete, 0, safe2ack);
1092 if (safe2ack == 0) {
1093 cppi_bd_free(rx, last);
1094 rx->last_processed = NULL;
1095
1096 /* if we land here on the host side, H_REQPKT will
1097 * be clear and we need to restart the queue...
1098 */
1099 WARN_ON(rx->head);
1100 }
1101 musb_ep_select(cppi->mregs, rx->index + 1);
1102 csr = musb_readw(regs, MUSB_RXCSR);
1103 if (csr & MUSB_RXCSR_DMAENAB) {
1104 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
1105 rx->index,
1106 rx->head, rx->tail,
1107 rx->last_processed
1108 ? rx->last_processed->dma
1109 : 0,
1110 completed ? ", completed" : "",
1111 csr);
1112 cppi_dump_rxq(4, "/what?", rx);
1113 }
1114 }
1115 if (!completed) {
1116 int csr;
1117
1118 rx->head = bd;
1119
1120 /* REVISIT seems like "autoreq all but EOP" doesn't...
1121 * setting it here "should" be racey, but seems to work
1122 */
1123 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1124 if (is_host_active(cppi->musb)
1125 && bd
1126 && !(csr & MUSB_RXCSR_H_REQPKT)) {
1127 csr |= MUSB_RXCSR_H_REQPKT;
1128 musb_writew(regs, MUSB_RXCSR,
1129 MUSB_RXCSR_H_WZC_BITS | csr);
1130 csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
1131 }
1132 } else {
1133 rx->head = NULL;
1134 rx->tail = NULL;
1135 }
1136
1137 cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
1138 return completed;
1139}
1140
1141void cppi_completion(struct musb *musb, u32 rx, u32 tx)
1142{
1143 void __iomem *tibase;
1144 int i, index;
1145 struct cppi *cppi;
1146 struct musb_hw_ep *hw_ep = NULL;
1147
1148 cppi = container_of(musb->dma_controller, struct cppi, controller);
1149
1150 tibase = musb->ctrl_base;
1151
1152 /* process TX channels */
1153 for (index = 0; tx; tx = tx >> 1, index++) {
1154 struct cppi_channel *tx_ch;
1155 struct cppi_tx_stateram __iomem *tx_ram;
1156 bool completed = false;
1157 struct cppi_descriptor *bd;
1158
1159 if (!(tx & 1))
1160 continue;
1161
1162 tx_ch = cppi->tx + index;
1163 tx_ram = tx_ch->state_ram;
1164
1165 /* FIXME need a cppi_tx_scan() routine, which
1166 * can also be called from abort code
1167 */
1168
1169 cppi_dump_tx(5, tx_ch, "/E");
1170
1171 bd = tx_ch->head;
1172
1173 if (NULL == bd) {
1174 DBG(1, "null BD\n");
1175 continue;
1176 }
1177
1178 /* run through all completed BDs */
1179 for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
1180 i++, bd = bd->next) {
1181 u16 len;
1182
1183 /* catch latest BD writes from CPPI */
1184 rmb();
1185 if (bd->hw_options & CPPI_OWN_SET)
1186 break;
1187
1188 DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
1189 bd, bd->hw_next, bd->hw_bufp,
1190 bd->hw_off_len, bd->hw_options);
1191
1192 len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
1193 tx_ch->channel.actual_len += len;
1194
1195 tx_ch->last_processed = bd;
1196
1197 /* write completion register to acknowledge
1198 * processing of completed BDs, and possibly
1199 * release the IRQ; EOQ might not be set ...
1200 *
1201 * REVISIT use the same ack strategy as rx
1202 *
1203 * REVISIT have observed bit 18 set; huh??
1204 */
1205 /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
1206 musb_writel(&tx_ram->tx_complete, 0, bd->dma);
1207
1208 /* stop scanning on end-of-segment */
1209 if (bd->hw_next == 0)
1210 completed = true;
1211 }
1212
1213 /* on end of segment, maybe go to next one */
1214 if (completed) {
1215 /* cppi_dump_tx(4, tx_ch, "/complete"); */
1216
1217 /* transfer more, or report completion */
1218 if (tx_ch->offset >= tx_ch->buf_len) {
1219 tx_ch->head = NULL;
1220 tx_ch->tail = NULL;
1221 tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1222
1223 hw_ep = tx_ch->hw_ep;
1224
1225 /* Peripheral role never repurposes the
1226 * endpoint, so immediate completion is
1227 * safe. Host role waits for the fifo
1228 * to empty (TXPKTRDY irq) before going
1229 * to the next queued bulk transfer.
1230 */
1231 if (is_host_active(cppi->musb)) {
1232#if 0
1233 /* WORKAROUND because we may
1234 * not always get TXKPTRDY ...
1235 */
1236 int csr;
1237
1238 csr = musb_readw(hw_ep->regs,
1239 MUSB_TXCSR);
1240 if (csr & MUSB_TXCSR_TXPKTRDY)
1241#endif
1242 completed = false;
1243 }
1244 if (completed)
1245 musb_dma_completion(musb, index + 1, 1);
1246
1247 } else {
1248 /* Bigger transfer than we could fit in
1249 * that first batch of descriptors...
1250 */
1251 cppi_next_tx_segment(musb, tx_ch);
1252 }
1253 } else
1254 tx_ch->head = bd;
1255 }
1256
1257 /* Start processing the RX block */
1258 for (index = 0; rx; rx = rx >> 1, index++) {
1259
1260 if (rx & 1) {
1261 struct cppi_channel *rx_ch;
1262
1263 rx_ch = cppi->rx + index;
1264
1265 /* let incomplete dma segments finish */
1266 if (!cppi_rx_scan(cppi, index))
1267 continue;
1268
1269 /* start another dma segment if needed */
1270 if (rx_ch->channel.actual_len != rx_ch->buf_len
1271 && rx_ch->channel.actual_len
1272 == rx_ch->offset) {
1273 cppi_next_rx_segment(musb, rx_ch, 1);
1274 continue;
1275 }
1276
1277 /* all segments completed! */
1278 rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
1279
1280 hw_ep = rx_ch->hw_ep;
1281
1282 core_rxirq_disable(tibase, index + 1);
1283 musb_dma_completion(musb, index + 1, 0);
1284 }
1285 }
1286
1287 /* write to CPPI EOI register to re-enable interrupts */
1288 musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
1289}
1290
1291/* Instantiate a software object representing a DMA controller. */
1292struct dma_controller *__init
1293dma_controller_create(struct musb *musb, void __iomem *mregs)
1294{
1295 struct cppi *controller;
1296
1297 controller = kzalloc(sizeof *controller, GFP_KERNEL);
1298 if (!controller)
1299 return NULL;
1300
1301 controller->mregs = mregs;
1302 controller->tibase = mregs - DAVINCI_BASE_OFFSET;
1303
1304 controller->musb = musb;
1305 controller->controller.start = cppi_controller_start;
1306 controller->controller.stop = cppi_controller_stop;
1307 controller->controller.channel_alloc = cppi_channel_allocate;
1308 controller->controller.channel_release = cppi_channel_release;
1309 controller->controller.channel_program = cppi_channel_program;
1310 controller->controller.channel_abort = cppi_channel_abort;
1311
1312 /* NOTE: allocating from on-chip SRAM would give the least
1313 * contention for memory access, if that ever matters here.
1314 */
1315
1316 /* setup BufferPool */
1317 controller->pool = dma_pool_create("cppi",
1318 controller->musb->controller,
1319 sizeof(struct cppi_descriptor),
1320 CPPI_DESCRIPTOR_ALIGN, 0);
1321 if (!controller->pool) {
1322 kfree(controller);
1323 return NULL;
1324 }
1325
1326 return &controller->controller;
1327}
1328
1329/*
1330 * Destroy a previously-instantiated DMA controller.
1331 */
1332void dma_controller_destroy(struct dma_controller *c)
1333{
1334 struct cppi *cppi;
1335
1336 cppi = container_of(c, struct cppi, controller);
1337
1338 /* assert: caller stopped the controller first */
1339 dma_pool_destroy(cppi->pool);
1340
1341 kfree(cppi);
1342}
1343
1344/*
1345 * Context: controller irqlocked, endpoint selected
1346 */
1347static int cppi_channel_abort(struct dma_channel *channel)
1348{
1349 struct cppi_channel *cppi_ch;
1350 struct cppi *controller;
1351 void __iomem *mbase;
1352 void __iomem *tibase;
1353 void __iomem *regs;
1354 u32 value;
1355 struct cppi_descriptor *queue;
1356
1357 cppi_ch = container_of(channel, struct cppi_channel, channel);
1358
1359 controller = cppi_ch->controller;
1360
1361 switch (channel->status) {
1362 case MUSB_DMA_STATUS_BUS_ABORT:
1363 case MUSB_DMA_STATUS_CORE_ABORT:
1364 /* from RX or TX fault irq handler */
1365 case MUSB_DMA_STATUS_BUSY:
1366 /* the hardware needs shutting down */
1367 regs = cppi_ch->hw_ep->regs;
1368 break;
1369 case MUSB_DMA_STATUS_UNKNOWN:
1370 case MUSB_DMA_STATUS_FREE:
1371 return 0;
1372 default:
1373 return -EINVAL;
1374 }
1375
1376 if (!cppi_ch->transmit && cppi_ch->head)
1377 cppi_dump_rxq(3, "/abort", cppi_ch);
1378
1379 mbase = controller->mregs;
1380 tibase = controller->tibase;
1381
1382 queue = cppi_ch->head;
1383 cppi_ch->head = NULL;
1384 cppi_ch->tail = NULL;
1385
1386 /* REVISIT should rely on caller having done this,
1387 * and caller should rely on us not changing it.
1388 * peripheral code is safe ... check host too.
1389 */
1390 musb_ep_select(mbase, cppi_ch->index + 1);
1391
1392 if (cppi_ch->transmit) {
1393 struct cppi_tx_stateram __iomem *tx_ram;
1394 int enabled;
1395
1396 /* mask interrupts raised to signal teardown complete. */
1397 enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
1398 & (1 << cppi_ch->index);
1399 if (enabled)
1400 musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
1401 (1 << cppi_ch->index));
1402
1403 /* REVISIT put timeouts on these controller handshakes */
1404
1405 cppi_dump_tx(6, cppi_ch, " (teardown)");
1406
1407 /* teardown DMA engine then usb core */
1408 do {
1409 value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
1410 } while (!(value & CPPI_TEAR_READY));
1411 musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
1412
1413 tx_ram = cppi_ch->state_ram;
1414 do {
1415 value = musb_readl(&tx_ram->tx_complete, 0);
1416 } while (0xFFFFFFFC != value);
1417 musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
1418
1419 /* FIXME clean up the transfer state ... here?
1420 * the completion routine should get called with
1421 * an appropriate status code.
1422 */
1423
1424 value = musb_readw(regs, MUSB_TXCSR);
1425 value &= ~MUSB_TXCSR_DMAENAB;
1426 value |= MUSB_TXCSR_FLUSHFIFO;
1427 musb_writew(regs, MUSB_TXCSR, value);
1428 musb_writew(regs, MUSB_TXCSR, value);
1429
1430 /* re-enable interrupt */
1431 if (enabled)
1432 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
1433 (1 << cppi_ch->index));
1434
1435 /* While we scrub the TX state RAM, ensure that we clean
1436 * up any interrupt that's currently asserted:
1437 * 1. Write to completion Ptr value 0x1(bit 0 set)
1438 * (write back mode)
1439 * 2. Write to completion Ptr value 0x0(bit 0 cleared)
1440 * (compare mode)
1441 * Value written is compared(for bits 31:2) and when
1442 * equal, interrupt is deasserted.
1443 */
1444 cppi_reset_tx(tx_ram, 1);
1445 musb_writel(&tx_ram->tx_complete, 0, 0);
1446
1447 cppi_dump_tx(5, cppi_ch, " (done teardown)");
1448
1449 /* REVISIT tx side _should_ clean up the same way
1450 * as the RX side ... this does no cleanup at all!
1451 */
1452
1453 } else /* RX */ {
1454 u16 csr;
1455
1456 /* NOTE: docs don't guarantee any of this works ... we
1457 * expect that if the usb core stops telling the cppi core
1458 * to pull more data from it, then it'll be safe to flush
1459 * current RX DMA state iff any pending fifo transfer is done.
1460 */
1461
1462 core_rxirq_disable(tibase, cppi_ch->index + 1);
1463
1464 /* for host, ensure ReqPkt is never set again */
1465 if (is_host_active(cppi_ch->controller->musb)) {
1466 value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
1467 value &= ~((0x3) << (cppi_ch->index * 2));
1468 musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
1469 }
1470
1471 csr = musb_readw(regs, MUSB_RXCSR);
1472
1473 /* for host, clear (just) ReqPkt at end of current packet(s) */
1474 if (is_host_active(cppi_ch->controller->musb)) {
1475 csr |= MUSB_RXCSR_H_WZC_BITS;
1476 csr &= ~MUSB_RXCSR_H_REQPKT;
1477 } else
1478 csr |= MUSB_RXCSR_P_WZC_BITS;
1479
1480 /* clear dma enable */
1481 csr &= ~(MUSB_RXCSR_DMAENAB);
1482 musb_writew(regs, MUSB_RXCSR, csr);
1483 csr = musb_readw(regs, MUSB_RXCSR);
1484
1485 /* Quiesce: wait for current dma to finish (if not cleanup).
1486 * We can't use bit zero of stateram->rx_sop, since that
1487 * refers to an entire "DMA packet" not just emptying the
1488 * current fifo. Most segments need multiple usb packets.
1489 */
1490 if (channel->status == MUSB_DMA_STATUS_BUSY)
1491 udelay(50);
1492
1493 /* scan the current list, reporting any data that was
1494 * transferred and acking any IRQ
1495 */
1496 cppi_rx_scan(controller, cppi_ch->index);
1497
1498 /* clobber the existing state once it's idle
1499 *
1500 * NOTE: arguably, we should also wait for all the other
1501 * RX channels to quiesce (how??) and then temporarily
1502 * disable RXCPPI_CTRL_REG ... but it seems that we can
1503 * rely on the controller restarting from state ram, with
1504 * only RXCPPI_BUFCNT state being bogus. BUFCNT will
1505 * correct itself after the next DMA transfer though.
1506 *
1507 * REVISIT does using rndis mode change that?
1508 */
1509 cppi_reset_rx(cppi_ch->state_ram);
1510
1511 /* next DMA request _should_ load cppi head ptr */
1512
1513 /* ... we don't "free" that list, only mutate it in place. */
1514 cppi_dump_rx(5, cppi_ch, " (done abort)");
1515
1516 /* clean up previously pending bds */
1517 cppi_bd_free(cppi_ch, cppi_ch->last_processed);
1518 cppi_ch->last_processed = NULL;
1519
1520 while (queue) {
1521 struct cppi_descriptor *tmp = queue->next;
1522
1523 cppi_bd_free(cppi_ch, queue);
1524 queue = tmp;
1525 }
1526 }
1527
1528 channel->status = MUSB_DMA_STATUS_FREE;
1529 cppi_ch->buf_dma = 0;
1530 cppi_ch->offset = 0;
1531 cppi_ch->buf_len = 0;
1532 cppi_ch->maxpacket = 0;
1533 return 0;
1534}
1535
1536/* TBD Queries:
1537 *
1538 * Power Management ... probably turn off cppi during suspend, restart;
1539 * check state ram? Clocking is presumably shared with usb core.
1540 */
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
new file mode 100644
index 000000000000..fc5216b5d2c5
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.h
@@ -0,0 +1,133 @@
1/* Copyright (C) 2005-2006 by Texas Instruments */
2
3#ifndef _CPPI_DMA_H_
4#define _CPPI_DMA_H_
5
6#include <linux/slab.h>
7#include <linux/list.h>
8#include <linux/smp_lock.h>
9#include <linux/errno.h>
10#include <linux/dmapool.h>
11
12#include "musb_dma.h"
13#include "musb_core.h"
14
15
16/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
17 * would seem to be shared with the TUSB6020 (over VLYNQ).
18 */
19
20#include "davinci.h"
21
22
23/* CPPI RX/TX state RAM */
24
25struct cppi_tx_stateram {
26 u32 tx_head; /* "DMA packet" head descriptor */
27 u32 tx_buf;
28 u32 tx_current; /* current descriptor */
29 u32 tx_buf_current;
30 u32 tx_info; /* flags, remaining buflen */
31 u32 tx_rem_len;
32 u32 tx_dummy; /* unused */
33 u32 tx_complete;
34};
35
36struct cppi_rx_stateram {
37 u32 rx_skipbytes;
38 u32 rx_head;
39 u32 rx_sop; /* "DMA packet" head descriptor */
40 u32 rx_current; /* current descriptor */
41 u32 rx_buf_current;
42 u32 rx_len_len;
43 u32 rx_cnt_cnt;
44 u32 rx_complete;
45};
46
47/* hw_options bits in CPPI buffer descriptors */
48#define CPPI_SOP_SET ((u32)(1 << 31))
49#define CPPI_EOP_SET ((u32)(1 << 30))
50#define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */
51#define CPPI_EOQ_MASK ((u32)(1 << 28))
52#define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */
53#define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */
54
55#define CPPI_RECV_PKTLEN_MASK 0xFFFF
56#define CPPI_BUFFER_LEN_MASK 0xFFFF
57
58#define CPPI_TEAR_READY ((u32)(1 << 31))
59
60/* CPPI data structure definitions */
61
62#define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */
63
64struct cppi_descriptor {
65 /* hardware overlay */
66 u32 hw_next; /* next buffer descriptor Pointer */
67 u32 hw_bufp; /* i/o buffer pointer */
68 u32 hw_off_len; /* buffer_offset16, buffer_length16 */
69 u32 hw_options; /* flags: SOP, EOP etc*/
70
71 struct cppi_descriptor *next;
72 dma_addr_t dma; /* address of this descriptor */
73 u32 buflen; /* for RX: original buffer length */
74} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
75
76
77struct cppi;
78
79/* CPPI Channel Control structure */
80struct cppi_channel {
81 struct dma_channel channel;
82
83 /* back pointer to the DMA controller structure */
84 struct cppi *controller;
85
86 /* which direction of which endpoint? */
87 struct musb_hw_ep *hw_ep;
88 bool transmit;
89 u8 index;
90
91 /* DMA modes: RNDIS or "transparent" */
92 u8 is_rndis;
93
94 /* book keeping for current transfer request */
95 dma_addr_t buf_dma;
96 u32 buf_len;
97 u32 maxpacket;
98 u32 offset; /* dma requested */
99
100 void __iomem *state_ram; /* CPPI state */
101
102 struct cppi_descriptor *freelist;
103
104 /* BD management fields */
105 struct cppi_descriptor *head;
106 struct cppi_descriptor *tail;
107 struct cppi_descriptor *last_processed;
108
109 /* use tx_complete in host role to track endpoints waiting for
110 * FIFONOTEMPTY to clear.
111 */
112 struct list_head tx_complete;
113};
114
115/* CPPI DMA controller object */
116struct cppi {
117 struct dma_controller controller;
118 struct musb *musb;
119 void __iomem *mregs; /* Mentor regs */
120 void __iomem *tibase; /* TI/CPPI regs */
121
122 struct cppi_channel tx[MUSB_C_NUM_EPT - 1];
123 struct cppi_channel rx[MUSB_C_NUM_EPR - 1];
124
125 struct dma_pool *pool;
126
127 struct list_head tx_complete;
128};
129
130/* irq handling hook */
131extern void cppi_completion(struct musb *, u32 rx, u32 tx);
132
133#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
new file mode 100644
index 000000000000..75baf181a8cd
--- /dev/null
+++ b/drivers/usb/musb/davinci.c
@@ -0,0 +1,462 @@
1/*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 *
4 * This file is part of the Inventra Controller Driver for Linux.
5 *
6 * The Inventra Controller Driver for Linux is free software; you
7 * can redistribute it and/or modify it under the terms of the GNU
8 * General Public License version 2 as published by the Free Software
9 * Foundation.
10 *
11 * The Inventra Controller Driver for Linux is distributed in
12 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
13 * without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 * License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with The Inventra Controller Driver for Linux ; if not,
19 * write to the Free Software Foundation, Inc., 59 Temple Place,
20 * Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/init.h>
29#include <linux/list.h>
30#include <linux/delay.h>
31#include <linux/clk.h>
32#include <linux/io.h>
33
34#include <asm/arch/hardware.h>
35#include <asm/arch/memory.h>
36#include <asm/arch/gpio.h>
37#include <asm/mach-types.h>
38
39#include "musb_core.h"
40
41#ifdef CONFIG_MACH_DAVINCI_EVM
42#include <asm/arch/i2c-client.h>
43#endif
44
45#include "davinci.h"
46#include "cppi_dma.h"
47
48
49/* REVISIT (PM) we should be able to keep the PHY in low power mode most
50 * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
51 * and, when in host mode, autosuspending idle root ports... PHYPLLON
52 * (overriding SUSPENDM?) then likely needs to stay off.
53 */
54
55static inline void phy_on(void)
56{
57 /* start the on-chip PHY and its PLL */
58 __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
59 (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR));
60 while ((__raw_readl((void __force __iomem *)
61 IO_ADDRESS(USBPHY_CTL_PADDR))
62 & USBPHY_PHYCLKGD) == 0)
63 cpu_relax();
64}
65
66static inline void phy_off(void)
67{
68 /* powerdown the on-chip PHY and its oscillator */
69 __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *)
70 IO_ADDRESS(USBPHY_CTL_PADDR));
71}
72
73static int dma_off = 1;
74
75void musb_platform_enable(struct musb *musb)
76{
77 u32 tmp, old, val;
78
79 /* workaround: setup irqs through both register sets */
80 tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK)
81 << DAVINCI_USB_TXINT_SHIFT;
82 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
83 old = tmp;
84 tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
85 << DAVINCI_USB_RXINT_SHIFT;
86 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
87 tmp |= old;
88
89 val = ~MUSB_INTR_SOF;
90 tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
91 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
92
93 if (is_dma_capable() && !dma_off)
94 printk(KERN_WARNING "%s %s: dma not reactivated\n",
95 __FILE__, __func__);
96 else
97 dma_off = 0;
98
99 /* force a DRVVBUS irq so we can start polling for ID change */
100 if (is_otg_enabled(musb))
101 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
102 DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
103}
104
105/*
106 * Disable the HDRC and flush interrupts
107 */
108void musb_platform_disable(struct musb *musb)
109{
110 /* because we don't set CTRLR.UINT, "important" to:
111 * - not read/write INTRUSB/INTRUSBE
112 * - (except during initial setup, as workaround)
113 * - use INTSETR/INTCLRR instead
114 */
115 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
116 DAVINCI_USB_USBINT_MASK
117 | DAVINCI_USB_TXINT_MASK
118 | DAVINCI_USB_RXINT_MASK);
119 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
120 musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
121
122 if (is_dma_capable() && !dma_off)
123 WARNING("dma still active\n");
124}
125
126
127/* REVISIT it's not clear whether DaVinci can support full OTG. */
128
129static int vbus_state = -1;
130
131#ifdef CONFIG_USB_MUSB_HDRC_HCD
132#define portstate(stmt) stmt
133#else
134#define portstate(stmt)
135#endif
136
137
138/* VBUS SWITCHING IS BOARD-SPECIFIC */
139
140#ifdef CONFIG_MACH_DAVINCI_EVM
141#ifndef CONFIG_MACH_DAVINCI_EVM_OTG
142
143/* I2C operations are always synchronous, and require a task context.
144 * With unloaded systems, using the shared workqueue seems to suffice
145 * to satisfy the 100msec A_WAIT_VRISE timeout...
146 */
147static void evm_deferred_drvvbus(struct work_struct *ignored)
148{
149 davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state);
150 vbus_state = !vbus_state;
151}
152static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
153
154#endif /* modified board */
155#endif /* EVM */
156
157static void davinci_source_power(struct musb *musb, int is_on, int immediate)
158{
159 if (is_on)
160 is_on = 1;
161
162 if (vbus_state == is_on)
163 return;
164 vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
165
166#ifdef CONFIG_MACH_DAVINCI_EVM
167 if (machine_is_davinci_evm()) {
168#ifdef CONFIG_MACH_DAVINCI_EVM_OTG
169 /* modified EVM board switching VBUS with GPIO(6) not I2C
170 * NOTE: PINMUX0.RGB888 (bit23) must be clear
171 */
172 if (is_on)
173 gpio_set(GPIO(6));
174 else
175 gpio_clear(GPIO(6));
176 immediate = 1;
177#else
178 if (immediate)
179 davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
180 else
181 schedule_work(&evm_vbus_work);
182#endif
183 }
184#endif
185 if (immediate)
186 vbus_state = is_on;
187}
188
189static void davinci_set_vbus(struct musb *musb, int is_on)
190{
191 WARN_ON(is_on && is_peripheral_active(musb));
192 davinci_source_power(musb, is_on, 0);
193}
194
195
196#define POLL_SECONDS 2
197
198static struct timer_list otg_workaround;
199
200static void otg_timer(unsigned long _musb)
201{
202 struct musb *musb = (void *)_musb;
203 void __iomem *mregs = musb->mregs;
204 u8 devctl;
205 unsigned long flags;
206
207 /* We poll because DaVinci's won't expose several OTG-critical
208 * status change events (from the transceiver) otherwise.
209 */
210 devctl = musb_readb(mregs, MUSB_DEVCTL);
211 DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
212
213 spin_lock_irqsave(&musb->lock, flags);
214 switch (musb->xceiv.state) {
215 case OTG_STATE_A_WAIT_VFALL:
216 /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
217 * seems to mis-handle session "start" otherwise (or in our
218 * case "recover"), in routine "VBUS was valid by the time
219 * VBUSERR got reported during enumeration" cases.
220 */
221 if (devctl & MUSB_DEVCTL_VBUS) {
222 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
223 break;
224 }
225 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
226 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
227 MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
228 break;
229 case OTG_STATE_B_IDLE:
230 if (!is_peripheral_enabled(musb))
231 break;
232
233 /* There's no ID-changed IRQ, so we have no good way to tell
234 * when to switch to the A-Default state machine (by setting
235 * the DEVCTL.SESSION flag).
236 *
237 * Workaround: whenever we're in B_IDLE, try setting the
238 * session flag every few seconds. If it works, ID was
239 * grounded and we're now in the A-Default state machine.
240 *
241 * NOTE setting the session flag is _supposed_ to trigger
242 * SRP, but clearly it doesn't.
243 */
244 musb_writeb(mregs, MUSB_DEVCTL,
245 devctl | MUSB_DEVCTL_SESSION);
246 devctl = musb_readb(mregs, MUSB_DEVCTL);
247 if (devctl & MUSB_DEVCTL_BDEVICE)
248 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
249 else
250 musb->xceiv.state = OTG_STATE_A_IDLE;
251 break;
252 default:
253 break;
254 }
255 spin_unlock_irqrestore(&musb->lock, flags);
256}
257
258static irqreturn_t davinci_interrupt(int irq, void *__hci)
259{
260 unsigned long flags;
261 irqreturn_t retval = IRQ_NONE;
262 struct musb *musb = __hci;
263 void __iomem *tibase = musb->ctrl_base;
264 u32 tmp;
265
266 spin_lock_irqsave(&musb->lock, flags);
267
268 /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through
269 * the Mentor registers (except for setup), use the TI ones and EOI.
270 *
271 * Docs describe irq "vector" registers asociated with the CPPI and
272 * USB EOI registers. These hold a bitmask corresponding to the
273 * current IRQ, not an irq handler address. Would using those bits
274 * resolve some of the races observed in this dispatch code??
275 */
276
277 /* CPPI interrupts share the same IRQ line, but have their own
278 * mask, state, "vector", and EOI registers.
279 */
280 if (is_cppi_enabled()) {
281 u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
282 u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
283
284 if (cppi_tx || cppi_rx) {
285 DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
286 cppi_completion(musb, cppi_rx, cppi_tx);
287 retval = IRQ_HANDLED;
288 }
289 }
290
291 /* ack and handle non-CPPI interrupts */
292 tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
293 musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
294 DBG(4, "IRQ %08x\n", tmp);
295
296 musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
297 >> DAVINCI_USB_RXINT_SHIFT;
298 musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
299 >> DAVINCI_USB_TXINT_SHIFT;
300 musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
301 >> DAVINCI_USB_USBINT_SHIFT;
302
303 /* DRVVBUS irqs are the only proxy we have (a very poor one!) for
304 * DaVinci's missing ID change IRQ. We need an ID change IRQ to
305 * switch appropriately between halves of the OTG state machine.
306 * Managing DEVCTL.SESSION per Mentor docs requires we know its
307 * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
308 * Also, DRVVBUS pulses for SRP (but not at 5V) ...
309 */
310 if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) {
311 int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
312 void __iomem *mregs = musb->mregs;
313 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
314 int err = musb->int_usb & MUSB_INTR_VBUSERROR;
315
316 err = is_host_enabled(musb)
317 && (musb->int_usb & MUSB_INTR_VBUSERROR);
318 if (err) {
319 /* The Mentor core doesn't debounce VBUS as needed
320 * to cope with device connect current spikes. This
321 * means it's not uncommon for bus-powered devices
322 * to get VBUS errors during enumeration.
323 *
324 * This is a workaround, but newer RTL from Mentor
325 * seems to allow a better one: "re"starting sessions
326 * without waiting (on EVM, a **long** time) for VBUS
327 * to stop registering in devctl.
328 */
329 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
330 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
331 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
332 WARNING("VBUS error workaround (delay coming)\n");
333 } else if (is_host_enabled(musb) && drvvbus) {
334 musb->is_active = 1;
335 MUSB_HST_MODE(musb);
336 musb->xceiv.default_a = 1;
337 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
338 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
339 del_timer(&otg_workaround);
340 } else {
341 musb->is_active = 0;
342 MUSB_DEV_MODE(musb);
343 musb->xceiv.default_a = 0;
344 musb->xceiv.state = OTG_STATE_B_IDLE;
345 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
346 }
347
348 /* NOTE: this must complete poweron within 100 msec */
349 davinci_source_power(musb, drvvbus, 0);
350 DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
351 drvvbus ? "on" : "off",
352 otg_state_string(musb),
353 err ? " ERROR" : "",
354 devctl);
355 retval = IRQ_HANDLED;
356 }
357
358 if (musb->int_tx || musb->int_rx || musb->int_usb)
359 retval |= musb_interrupt(musb);
360
361 /* irq stays asserted until EOI is written */
362 musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
363
364 /* poll for ID change */
365 if (is_otg_enabled(musb)
366 && musb->xceiv.state == OTG_STATE_B_IDLE)
367 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
368
369 spin_unlock_irqrestore(&musb->lock, flags);
370
371 /* REVISIT we sometimes get unhandled IRQs
372 * (e.g. ep0). not clear why...
373 */
374 if (retval != IRQ_HANDLED)
375 DBG(5, "unhandled? %08x\n", tmp);
376 return IRQ_HANDLED;
377}
378
379int __init musb_platform_init(struct musb *musb)
380{
381 void __iomem *tibase = musb->ctrl_base;
382 u32 revision;
383
384 musb->mregs += DAVINCI_BASE_OFFSET;
385#if 0
386 /* REVISIT there's something odd about clocking, this
387 * didn't appear do the job ...
388 */
389 musb->clock = clk_get(pDevice, "usb");
390 if (IS_ERR(musb->clock))
391 return PTR_ERR(musb->clock);
392
393 status = clk_enable(musb->clock);
394 if (status < 0)
395 return -ENODEV;
396#endif
397
398 /* returns zero if e.g. not clocked */
399 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
400 if (revision == 0)
401 return -ENODEV;
402
403 if (is_host_enabled(musb))
404 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
405
406 musb->board_set_vbus = davinci_set_vbus;
407 davinci_source_power(musb, 0, 1);
408
409 /* reset the controller */
410 musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
411
412 /* start the on-chip PHY and its PLL */
413 phy_on();
414
415 msleep(5);
416
417 /* NOTE: irqs are in mixed mode, not bypass to pure-musb */
418 pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
419 revision, __raw_readl((void __force __iomem *)
420 IO_ADDRESS(USBPHY_CTL_PADDR)),
421 musb_readb(tibase, DAVINCI_USB_CTRL_REG));
422
423 musb->isr = davinci_interrupt;
424 return 0;
425}
426
427int musb_platform_exit(struct musb *musb)
428{
429 if (is_host_enabled(musb))
430 del_timer_sync(&otg_workaround);
431
432 davinci_source_power(musb, 0 /*off*/, 1);
433
434 /* delay, to avoid problems with module reload */
435 if (is_host_enabled(musb) && musb->xceiv.default_a) {
436 int maxdelay = 30;
437 u8 devctl, warn = 0;
438
439 /* if there's no peripheral connected, this can take a
440 * long time to fall, especially on EVM with huge C133.
441 */
442 do {
443 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
444 if (!(devctl & MUSB_DEVCTL_VBUS))
445 break;
446 if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
447 warn = devctl & MUSB_DEVCTL_VBUS;
448 DBG(1, "VBUS %d\n",
449 warn >> MUSB_DEVCTL_VBUS_SHIFT);
450 }
451 msleep(1000);
452 maxdelay--;
453 } while (maxdelay > 0);
454
455 /* in OTG mode, another host might be connected */
456 if (devctl & MUSB_DEVCTL_VBUS)
457 DBG(1, "VBUS off timeout (devctl %02x)\n", devctl);
458 }
459
460 phy_off();
461 return 0;
462}
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
new file mode 100644
index 000000000000..7fb6238e270f
--- /dev/null
+++ b/drivers/usb/musb/davinci.h
@@ -0,0 +1,100 @@
1/*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 *
4 * The Inventra Controller Driver for Linux is free software; you
5 * can redistribute it and/or modify it under the terms of the GNU
6 * General Public License version 2 as published by the Free Software
7 * Foundation.
8 */
9
10#ifndef __MUSB_HDRDF_H__
11#define __MUSB_HDRDF_H__
12
13/*
14 * DaVinci-specific definitions
15 */
16
17/* Integrated highspeed/otg PHY */
18#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
19#define USBPHY_PHYCLKGD (1 << 8)
20#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */
21#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */
22#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */
23#define USBPHY_CLKO1SEL (1 << 3)
24#define USBPHY_OSCPDWN (1 << 2)
25#define USBPHY_PHYPDWN (1 << 0)
26
27/* For now include usb OTG module registers here */
28#define DAVINCI_USB_VERSION_REG 0x00
29#define DAVINCI_USB_CTRL_REG 0x04
30#define DAVINCI_USB_STAT_REG 0x08
31#define DAVINCI_RNDIS_REG 0x10
32#define DAVINCI_AUTOREQ_REG 0x14
33#define DAVINCI_USB_INT_SOURCE_REG 0x20
34#define DAVINCI_USB_INT_SET_REG 0x24
35#define DAVINCI_USB_INT_SRC_CLR_REG 0x28
36#define DAVINCI_USB_INT_MASK_REG 0x2c
37#define DAVINCI_USB_INT_MASK_SET_REG 0x30
38#define DAVINCI_USB_INT_MASK_CLR_REG 0x34
39#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38
40#define DAVINCI_USB_EOI_REG 0x3c
41#define DAVINCI_USB_EOI_INTVEC 0x40
42
43/* BEGIN CPPI-generic (?) */
44
45/* CPPI related registers */
46#define DAVINCI_TXCPPI_CTRL_REG 0x80
47#define DAVINCI_TXCPPI_TEAR_REG 0x84
48#define DAVINCI_CPPI_EOI_REG 0x88
49#define DAVINCI_CPPI_INTVEC_REG 0x8c
50#define DAVINCI_TXCPPI_MASKED_REG 0x90
51#define DAVINCI_TXCPPI_RAW_REG 0x94
52#define DAVINCI_TXCPPI_INTENAB_REG 0x98
53#define DAVINCI_TXCPPI_INTCLR_REG 0x9c
54
55#define DAVINCI_RXCPPI_CTRL_REG 0xC0
56#define DAVINCI_RXCPPI_MASKED_REG 0xD0
57#define DAVINCI_RXCPPI_RAW_REG 0xD4
58#define DAVINCI_RXCPPI_INTENAB_REG 0xD8
59#define DAVINCI_RXCPPI_INTCLR_REG 0xDC
60
61#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0
62#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4
63#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8
64#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC
65
66/* CPPI state RAM entries */
67#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100
68
69#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \
70 (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40))
71#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \
72 (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40))
73
74/* CPPI masks */
75#define DAVINCI_DMA_CTRL_ENABLE 1
76#define DAVINCI_DMA_CTRL_DISABLE 0
77
78#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF
79#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF
80
81/* END CPPI-generic (?) */
82
83#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */
84#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */
85
86#define DAVINCI_USB_USBINT_SHIFT 16
87#define DAVINCI_USB_TXINT_SHIFT 0
88#define DAVINCI_USB_RXINT_SHIFT 8
89
90#define DAVINCI_INTR_DRVVBUS 0x0100
91
92#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */
93#define DAVINCI_USB_TXINT_MASK \
94 (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT)
95#define DAVINCI_USB_RXINT_MASK \
96 (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT)
97
98#define DAVINCI_BASE_OFFSET 0x400
99
100#endif /* __MUSB_HDRDF_H__ */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
new file mode 100644
index 000000000000..d68ec6daf335
--- /dev/null
+++ b/drivers/usb/musb/musb_core.c
@@ -0,0 +1,2261 @@
1/*
2 * MUSB OTG driver core code
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35/*
36 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
37 *
38 * This consists of a Host Controller Driver (HCD) and a peripheral
39 * controller driver implementing the "Gadget" API; OTG support is
40 * in the works. These are normal Linux-USB controller drivers which
41 * use IRQs and have no dedicated thread.
42 *
43 * This version of the driver has only been used with products from
44 * Texas Instruments. Those products integrate the Inventra logic
45 * with other DMA, IRQ, and bus modules, as well as other logic that
46 * needs to be reflected in this driver.
47 *
48 *
49 * NOTE: the original Mentor code here was pretty much a collection
50 * of mechanisms that don't seem to have been fully integrated/working
51 * for *any* Linux kernel version. This version aims at Linux 2.6.now,
52 * Key open issues include:
53 *
54 * - Lack of host-side transaction scheduling, for all transfer types.
55 * The hardware doesn't do it; instead, software must.
56 *
57 * This is not an issue for OTG devices that don't support external
58 * hubs, but for more "normal" USB hosts it's a user issue that the
59 * "multipoint" support doesn't scale in the expected ways. That
60 * includes DaVinci EVM in a common non-OTG mode.
61 *
62 * * Control and bulk use dedicated endpoints, and there's as
63 * yet no mechanism to either (a) reclaim the hardware when
64 * peripherals are NAKing, which gets complicated with bulk
65 * endpoints, or (b) use more than a single bulk endpoint in
66 * each direction.
67 *
68 * RESULT: one device may be perceived as blocking another one.
69 *
70 * * Interrupt and isochronous will dynamically allocate endpoint
71 * hardware, but (a) there's no record keeping for bandwidth;
72 * (b) in the common case that few endpoints are available, there
73 * is no mechanism to reuse endpoints to talk to multiple devices.
74 *
75 * RESULT: At one extreme, bandwidth can be overcommitted in
76 * some hardware configurations, no faults will be reported.
77 * At the other extreme, the bandwidth capabilities which do
78 * exist tend to be severely undercommitted. You can't yet hook
79 * up both a keyboard and a mouse to an external USB hub.
80 */
81
82/*
83 * This gets many kinds of configuration information:
84 * - Kconfig for everything user-configurable
85 * - <asm/arch/hdrc_cnf.h> for SOC or family details
86 * - platform_device for addressing, irq, and platform_data
87 * - platform_data is mostly for board-specific informarion
88 *
89 * Most of the conditional compilation will (someday) vanish.
90 */
91
92#include <linux/module.h>
93#include <linux/kernel.h>
94#include <linux/sched.h>
95#include <linux/slab.h>
96#include <linux/init.h>
97#include <linux/list.h>
98#include <linux/kobject.h>
99#include <linux/platform_device.h>
100#include <linux/io.h>
101
102#ifdef CONFIG_ARM
103#include <asm/arch/hardware.h>
104#include <asm/arch/memory.h>
105#include <asm/mach-types.h>
106#endif
107
108#include "musb_core.h"
109
110
111#ifdef CONFIG_ARCH_DAVINCI
112#include "davinci.h"
113#endif
114
115
116
117#if MUSB_DEBUG > 0
118unsigned debug = MUSB_DEBUG;
119module_param(debug, uint, 0);
120MODULE_PARM_DESC(debug, "initial debug message level");
121
122#define MUSB_VERSION_SUFFIX "/dbg"
123#endif
124
125#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
126#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
127
128#define MUSB_VERSION_BASE "6.0"
129
130#ifndef MUSB_VERSION_SUFFIX
131#define MUSB_VERSION_SUFFIX ""
132#endif
133#define MUSB_VERSION MUSB_VERSION_BASE MUSB_VERSION_SUFFIX
134
135#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
136
137#define MUSB_DRIVER_NAME "musb_hdrc"
138const char musb_driver_name[] = MUSB_DRIVER_NAME;
139
140MODULE_DESCRIPTION(DRIVER_INFO);
141MODULE_AUTHOR(DRIVER_AUTHOR);
142MODULE_LICENSE("GPL");
143MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
144
145
146/*-------------------------------------------------------------------------*/
147
148static inline struct musb *dev_to_musb(struct device *dev)
149{
150#ifdef CONFIG_USB_MUSB_HDRC_HCD
151 /* usbcore insists dev->driver_data is a "struct hcd *" */
152 return hcd_to_musb(dev_get_drvdata(dev));
153#else
154 return dev_get_drvdata(dev);
155#endif
156}
157
158/*-------------------------------------------------------------------------*/
159
160#ifndef CONFIG_USB_TUSB6010
161/*
162 * Load an endpoint's FIFO
163 */
164void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
165{
166 void __iomem *fifo = hw_ep->fifo;
167
168 prefetch((u8 *)src);
169
170 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
171 'T', hw_ep->epnum, fifo, len, src);
172
173 /* we can't assume unaligned reads work */
174 if (likely((0x01 & (unsigned long) src) == 0)) {
175 u16 index = 0;
176
177 /* best case is 32bit-aligned source address */
178 if ((0x02 & (unsigned long) src) == 0) {
179 if (len >= 4) {
180 writesl(fifo, src + index, len >> 2);
181 index += len & ~0x03;
182 }
183 if (len & 0x02) {
184 musb_writew(fifo, 0, *(u16 *)&src[index]);
185 index += 2;
186 }
187 } else {
188 if (len >= 2) {
189 writesw(fifo, src + index, len >> 1);
190 index += len & ~0x01;
191 }
192 }
193 if (len & 0x01)
194 musb_writeb(fifo, 0, src[index]);
195 } else {
196 /* byte aligned */
197 writesb(fifo, src, len);
198 }
199}
200
201/*
202 * Unload an endpoint's FIFO
203 */
204void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
205{
206 void __iomem *fifo = hw_ep->fifo;
207
208 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
209 'R', hw_ep->epnum, fifo, len, dst);
210
211 /* we can't assume unaligned writes work */
212 if (likely((0x01 & (unsigned long) dst) == 0)) {
213 u16 index = 0;
214
215 /* best case is 32bit-aligned destination address */
216 if ((0x02 & (unsigned long) dst) == 0) {
217 if (len >= 4) {
218 readsl(fifo, dst, len >> 2);
219 index = len & ~0x03;
220 }
221 if (len & 0x02) {
222 *(u16 *)&dst[index] = musb_readw(fifo, 0);
223 index += 2;
224 }
225 } else {
226 if (len >= 2) {
227 readsw(fifo, dst, len >> 1);
228 index = len & ~0x01;
229 }
230 }
231 if (len & 0x01)
232 dst[index] = musb_readb(fifo, 0);
233 } else {
234 /* byte aligned */
235 readsb(fifo, dst, len);
236 }
237}
238
239#endif /* normal PIO */
240
241
242/*-------------------------------------------------------------------------*/
243
244/* for high speed test mode; see USB 2.0 spec 7.1.20 */
245static const u8 musb_test_packet[53] = {
246 /* implicit SYNC then DATA0 to start */
247
248 /* JKJKJKJK x9 */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
250 /* JJKKJJKK x8 */
251 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
252 /* JJJJKKKK x8 */
253 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
254 /* JJJJJJJKKKKKKK x8 */
255 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
256 /* JJJJJJJK x8 */
257 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
258 /* JKKKKKKK x10, JK */
259 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
260
261 /* implicit CRC16 then EOP to end */
262};
263
264void musb_load_testpacket(struct musb *musb)
265{
266 void __iomem *regs = musb->endpoints[0].regs;
267
268 musb_ep_select(musb->mregs, 0);
269 musb_write_fifo(musb->control_ep,
270 sizeof(musb_test_packet), musb_test_packet);
271 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
272}
273
274/*-------------------------------------------------------------------------*/
275
276const char *otg_state_string(struct musb *musb)
277{
278 switch (musb->xceiv.state) {
279 case OTG_STATE_A_IDLE: return "a_idle";
280 case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise";
281 case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon";
282 case OTG_STATE_A_HOST: return "a_host";
283 case OTG_STATE_A_SUSPEND: return "a_suspend";
284 case OTG_STATE_A_PERIPHERAL: return "a_peripheral";
285 case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall";
286 case OTG_STATE_A_VBUS_ERR: return "a_vbus_err";
287 case OTG_STATE_B_IDLE: return "b_idle";
288 case OTG_STATE_B_SRP_INIT: return "b_srp_init";
289 case OTG_STATE_B_PERIPHERAL: return "b_peripheral";
290 case OTG_STATE_B_WAIT_ACON: return "b_wait_acon";
291 case OTG_STATE_B_HOST: return "b_host";
292 default: return "UNDEFINED";
293 }
294}
295
296#ifdef CONFIG_USB_MUSB_OTG
297
298/*
299 * See also USB_OTG_1-3.pdf 6.6.5 Timers
300 * REVISIT: Are the other timers done in the hardware?
301 */
302#define TB_ASE0_BRST 100 /* Min 3.125 ms */
303
304/*
305 * Handles OTG hnp timeouts, such as b_ase0_brst
306 */
307void musb_otg_timer_func(unsigned long data)
308{
309 struct musb *musb = (struct musb *)data;
310 unsigned long flags;
311
312 spin_lock_irqsave(&musb->lock, flags);
313 switch (musb->xceiv.state) {
314 case OTG_STATE_B_WAIT_ACON:
315 DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
316 musb_g_disconnect(musb);
317 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
318 musb->is_active = 0;
319 break;
320 case OTG_STATE_A_WAIT_BCON:
321 DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
322 musb_hnp_stop(musb);
323 break;
324 default:
325 DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
326 }
327 musb->ignore_disconnect = 0;
328 spin_unlock_irqrestore(&musb->lock, flags);
329}
330
331static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
332
333/*
334 * Stops the B-device HNP state. Caller must take care of locking.
335 */
336void musb_hnp_stop(struct musb *musb)
337{
338 struct usb_hcd *hcd = musb_to_hcd(musb);
339 void __iomem *mbase = musb->mregs;
340 u8 reg;
341
342 switch (musb->xceiv.state) {
343 case OTG_STATE_A_PERIPHERAL:
344 case OTG_STATE_A_WAIT_VFALL:
345 case OTG_STATE_A_WAIT_BCON:
346 DBG(1, "HNP: Switching back to A-host\n");
347 musb_g_disconnect(musb);
348 musb->xceiv.state = OTG_STATE_A_IDLE;
349 MUSB_HST_MODE(musb);
350 musb->is_active = 0;
351 break;
352 case OTG_STATE_B_HOST:
353 DBG(1, "HNP: Disabling HR\n");
354 hcd->self.is_b_host = 0;
355 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
356 MUSB_DEV_MODE(musb);
357 reg = musb_readb(mbase, MUSB_POWER);
358 reg |= MUSB_POWER_SUSPENDM;
359 musb_writeb(mbase, MUSB_POWER, reg);
360 /* REVISIT: Start SESSION_REQUEST here? */
361 break;
362 default:
363 DBG(1, "HNP: Stopping in unknown state %s\n",
364 otg_state_string(musb));
365 }
366
367 /*
368 * When returning to A state after HNP, avoid hub_port_rebounce(),
369 * which cause occasional OPT A "Did not receive reset after connect"
370 * errors.
371 */
372 musb->port1_status &=
373 ~(1 << USB_PORT_FEAT_C_CONNECTION);
374}
375
376#endif
377
378/*
379 * Interrupt Service Routine to record USB "global" interrupts.
380 * Since these do not happen often and signify things of
381 * paramount importance, it seems OK to check them individually;
382 * the order of the tests is specified in the manual
383 *
384 * @param musb instance pointer
385 * @param int_usb register contents
386 * @param devctl
387 * @param power
388 */
389
390#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \
391 | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \
392 | MUSB_INTR_RESET)
393
394static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
395 u8 devctl, u8 power)
396{
397 irqreturn_t handled = IRQ_NONE;
398 void __iomem *mbase = musb->mregs;
399
400 DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
401 int_usb);
402
403 /* in host mode, the peripheral may issue remote wakeup.
404 * in peripheral mode, the host may resume the link.
405 * spurious RESUME irqs happen too, paired with SUSPEND.
406 */
407 if (int_usb & MUSB_INTR_RESUME) {
408 handled = IRQ_HANDLED;
409 DBG(3, "RESUME (%s)\n", otg_state_string(musb));
410
411 if (devctl & MUSB_DEVCTL_HM) {
412#ifdef CONFIG_USB_MUSB_HDRC_HCD
413 switch (musb->xceiv.state) {
414 case OTG_STATE_A_SUSPEND:
415 /* remote wakeup? later, GetPortStatus
416 * will stop RESUME signaling
417 */
418
419 if (power & MUSB_POWER_SUSPENDM) {
420 /* spurious */
421 musb->int_usb &= ~MUSB_INTR_SUSPEND;
422 DBG(2, "Spurious SUSPENDM\n");
423 break;
424 }
425
426 power &= ~MUSB_POWER_SUSPENDM;
427 musb_writeb(mbase, MUSB_POWER,
428 power | MUSB_POWER_RESUME);
429
430 musb->port1_status |=
431 (USB_PORT_STAT_C_SUSPEND << 16)
432 | MUSB_PORT_STAT_RESUME;
433 musb->rh_timer = jiffies
434 + msecs_to_jiffies(20);
435
436 musb->xceiv.state = OTG_STATE_A_HOST;
437 musb->is_active = 1;
438 usb_hcd_resume_root_hub(musb_to_hcd(musb));
439 break;
440 case OTG_STATE_B_WAIT_ACON:
441 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
442 musb->is_active = 1;
443 MUSB_DEV_MODE(musb);
444 break;
445 default:
446 WARNING("bogus %s RESUME (%s)\n",
447 "host",
448 otg_state_string(musb));
449 }
450#endif
451 } else {
452 switch (musb->xceiv.state) {
453#ifdef CONFIG_USB_MUSB_HDRC_HCD
454 case OTG_STATE_A_SUSPEND:
455 /* possibly DISCONNECT is upcoming */
456 musb->xceiv.state = OTG_STATE_A_HOST;
457 usb_hcd_resume_root_hub(musb_to_hcd(musb));
458 break;
459#endif
460#ifdef CONFIG_USB_GADGET_MUSB_HDRC
461 case OTG_STATE_B_WAIT_ACON:
462 case OTG_STATE_B_PERIPHERAL:
463 /* disconnect while suspended? we may
464 * not get a disconnect irq...
465 */
466 if ((devctl & MUSB_DEVCTL_VBUS)
467 != (3 << MUSB_DEVCTL_VBUS_SHIFT)
468 ) {
469 musb->int_usb |= MUSB_INTR_DISCONNECT;
470 musb->int_usb &= ~MUSB_INTR_SUSPEND;
471 break;
472 }
473 musb_g_resume(musb);
474 break;
475 case OTG_STATE_B_IDLE:
476 musb->int_usb &= ~MUSB_INTR_SUSPEND;
477 break;
478#endif
479 default:
480 WARNING("bogus %s RESUME (%s)\n",
481 "peripheral",
482 otg_state_string(musb));
483 }
484 }
485 }
486
487#ifdef CONFIG_USB_MUSB_HDRC_HCD
488 /* see manual for the order of the tests */
489 if (int_usb & MUSB_INTR_SESSREQ) {
490 DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
491
492 /* IRQ arrives from ID pin sense or (later, if VBUS power
493 * is removed) SRP. responses are time critical:
494 * - turn on VBUS (with silicon-specific mechanism)
495 * - go through A_WAIT_VRISE
496 * - ... to A_WAIT_BCON.
497 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
498 */
499 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
500 musb->ep0_stage = MUSB_EP0_START;
501 musb->xceiv.state = OTG_STATE_A_IDLE;
502 MUSB_HST_MODE(musb);
503 musb_set_vbus(musb, 1);
504
505 handled = IRQ_HANDLED;
506 }
507
508 if (int_usb & MUSB_INTR_VBUSERROR) {
509 int ignore = 0;
510
511 /* During connection as an A-Device, we may see a short
512 * current spikes causing voltage drop, because of cable
513 * and peripheral capacitance combined with vbus draw.
514 * (So: less common with truly self-powered devices, where
515 * vbus doesn't act like a power supply.)
516 *
517 * Such spikes are short; usually less than ~500 usec, max
518 * of ~2 msec. That is, they're not sustained overcurrent
519 * errors, though they're reported using VBUSERROR irqs.
520 *
521 * Workarounds: (a) hardware: use self powered devices.
522 * (b) software: ignore non-repeated VBUS errors.
523 *
524 * REVISIT: do delays from lots of DEBUG_KERNEL checks
525 * make trouble here, keeping VBUS < 4.4V ?
526 */
527 switch (musb->xceiv.state) {
528 case OTG_STATE_A_HOST:
529 /* recovery is dicey once we've gotten past the
530 * initial stages of enumeration, but if VBUS
531 * stayed ok at the other end of the link, and
532 * another reset is due (at least for high speed,
533 * to redo the chirp etc), it might work OK...
534 */
535 case OTG_STATE_A_WAIT_BCON:
536 case OTG_STATE_A_WAIT_VRISE:
537 if (musb->vbuserr_retry) {
538 musb->vbuserr_retry--;
539 ignore = 1;
540 devctl |= MUSB_DEVCTL_SESSION;
541 musb_writeb(mbase, MUSB_DEVCTL, devctl);
542 } else {
543 musb->port1_status |=
544 (1 << USB_PORT_FEAT_OVER_CURRENT)
545 | (1 << USB_PORT_FEAT_C_OVER_CURRENT);
546 }
547 break;
548 default:
549 break;
550 }
551
552 DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
553 otg_state_string(musb),
554 devctl,
555 ({ char *s;
556 switch (devctl & MUSB_DEVCTL_VBUS) {
557 case 0 << MUSB_DEVCTL_VBUS_SHIFT:
558 s = "<SessEnd"; break;
559 case 1 << MUSB_DEVCTL_VBUS_SHIFT:
560 s = "<AValid"; break;
561 case 2 << MUSB_DEVCTL_VBUS_SHIFT:
562 s = "<VBusValid"; break;
563 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
564 default:
565 s = "VALID"; break;
566 }; s; }),
567 VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
568 musb->port1_status);
569
570 /* go through A_WAIT_VFALL then start a new session */
571 if (!ignore)
572 musb_set_vbus(musb, 0);
573 handled = IRQ_HANDLED;
574 }
575
576 if (int_usb & MUSB_INTR_CONNECT) {
577 struct usb_hcd *hcd = musb_to_hcd(musb);
578
579 handled = IRQ_HANDLED;
580 musb->is_active = 1;
581 set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
582
583 musb->ep0_stage = MUSB_EP0_START;
584
585#ifdef CONFIG_USB_MUSB_OTG
586 /* flush endpoints when transitioning from Device Mode */
587 if (is_peripheral_active(musb)) {
588 /* REVISIT HNP; just force disconnect */
589 }
590 musb_writew(mbase, MUSB_INTRTXE, musb->epmask);
591 musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe);
592 musb_writeb(mbase, MUSB_INTRUSBE, 0xf7);
593#endif
594 musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
595 |USB_PORT_STAT_HIGH_SPEED
596 |USB_PORT_STAT_ENABLE
597 );
598 musb->port1_status |= USB_PORT_STAT_CONNECTION
599 |(USB_PORT_STAT_C_CONNECTION << 16);
600
601 /* high vs full speed is just a guess until after reset */
602 if (devctl & MUSB_DEVCTL_LSDEV)
603 musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
604
605 if (hcd->status_urb)
606 usb_hcd_poll_rh_status(hcd);
607 else
608 usb_hcd_resume_root_hub(hcd);
609
610 MUSB_HST_MODE(musb);
611
612 /* indicate new connection to OTG machine */
613 switch (musb->xceiv.state) {
614 case OTG_STATE_B_PERIPHERAL:
615 if (int_usb & MUSB_INTR_SUSPEND) {
616 DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
617 musb->xceiv.state = OTG_STATE_B_HOST;
618 hcd->self.is_b_host = 1;
619 int_usb &= ~MUSB_INTR_SUSPEND;
620 } else
621 DBG(1, "CONNECT as b_peripheral???\n");
622 break;
623 case OTG_STATE_B_WAIT_ACON:
624 DBG(1, "HNP: Waiting to switch to b_host state\n");
625 musb->xceiv.state = OTG_STATE_B_HOST;
626 hcd->self.is_b_host = 1;
627 break;
628 default:
629 if ((devctl & MUSB_DEVCTL_VBUS)
630 == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
631 musb->xceiv.state = OTG_STATE_A_HOST;
632 hcd->self.is_b_host = 0;
633 }
634 break;
635 }
636 DBG(1, "CONNECT (%s) devctl %02x\n",
637 otg_state_string(musb), devctl);
638 }
639#endif /* CONFIG_USB_MUSB_HDRC_HCD */
640
641 /* mentor saves a bit: bus reset and babble share the same irq.
642 * only host sees babble; only peripheral sees bus reset.
643 */
644 if (int_usb & MUSB_INTR_RESET) {
645 if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
646 /*
647 * Looks like non-HS BABBLE can be ignored, but
648 * HS BABBLE is an error condition. For HS the solution
649 * is to avoid babble in the first place and fix what
650 * caused BABBLE. When HS BABBLE happens we can only
651 * stop the session.
652 */
653 if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
654 DBG(1, "BABBLE devctl: %02x\n", devctl);
655 else {
656 ERR("Stopping host session -- babble\n");
657 musb_writeb(mbase, MUSB_DEVCTL, 0);
658 }
659 } else if (is_peripheral_capable()) {
660 DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
661 switch (musb->xceiv.state) {
662#ifdef CONFIG_USB_OTG
663 case OTG_STATE_A_SUSPEND:
664 /* We need to ignore disconnect on suspend
665 * otherwise tusb 2.0 won't reconnect after a
666 * power cycle, which breaks otg compliance.
667 */
668 musb->ignore_disconnect = 1;
669 musb_g_reset(musb);
670 /* FALLTHROUGH */
671 case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
672 DBG(1, "HNP: Setting timer as %s\n",
673 otg_state_string(musb));
674 musb_otg_timer.data = (unsigned long)musb;
675 mod_timer(&musb_otg_timer, jiffies
676 + msecs_to_jiffies(100));
677 break;
678 case OTG_STATE_A_PERIPHERAL:
679 musb_hnp_stop(musb);
680 break;
681 case OTG_STATE_B_WAIT_ACON:
682 DBG(1, "HNP: RESET (%s), to b_peripheral\n",
683 otg_state_string(musb));
684 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
685 musb_g_reset(musb);
686 break;
687#endif
688 case OTG_STATE_B_IDLE:
689 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
690 /* FALLTHROUGH */
691 case OTG_STATE_B_PERIPHERAL:
692 musb_g_reset(musb);
693 break;
694 default:
695 DBG(1, "Unhandled BUS RESET as %s\n",
696 otg_state_string(musb));
697 }
698 }
699
700 handled = IRQ_HANDLED;
701 }
702 schedule_work(&musb->irq_work);
703
704 return handled;
705}
706
707/*
708 * Interrupt Service Routine to record USB "global" interrupts.
709 * Since these do not happen often and signify things of
710 * paramount importance, it seems OK to check them individually;
711 * the order of the tests is specified in the manual
712 *
713 * @param musb instance pointer
714 * @param int_usb register contents
715 * @param devctl
716 * @param power
717 */
718static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
719 u8 devctl, u8 power)
720{
721 irqreturn_t handled = IRQ_NONE;
722
723#if 0
724/* REVISIT ... this would be for multiplexing periodic endpoints, or
725 * supporting transfer phasing to prevent exceeding ISO bandwidth
726 * limits of a given frame or microframe.
727 *
728 * It's not needed for peripheral side, which dedicates endpoints;
729 * though it _might_ use SOF irqs for other purposes.
730 *
731 * And it's not currently needed for host side, which also dedicates
732 * endpoints, relies on TX/RX interval registers, and isn't claimed
733 * to support ISO transfers yet.
734 */
735 if (int_usb & MUSB_INTR_SOF) {
736 void __iomem *mbase = musb->mregs;
737 struct musb_hw_ep *ep;
738 u8 epnum;
739 u16 frame;
740
741 DBG(6, "START_OF_FRAME\n");
742 handled = IRQ_HANDLED;
743
744 /* start any periodic Tx transfers waiting for current frame */
745 frame = musb_readw(mbase, MUSB_FRAME);
746 ep = musb->endpoints;
747 for (epnum = 1; (epnum < musb->nr_endpoints)
748 && (musb->epmask >= (1 << epnum));
749 epnum++, ep++) {
750 /*
751 * FIXME handle framecounter wraps (12 bits)
752 * eliminate duplicated StartUrb logic
753 */
754 if (ep->dwWaitFrame >= frame) {
755 ep->dwWaitFrame = 0;
756 pr_debug("SOF --> periodic TX%s on %d\n",
757 ep->tx_channel ? " DMA" : "",
758 epnum);
759 if (!ep->tx_channel)
760 musb_h_tx_start(musb, epnum);
761 else
762 cppi_hostdma_start(musb, epnum);
763 }
764 } /* end of for loop */
765 }
766#endif
767
768 if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
769 DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
770 otg_state_string(musb),
771 MUSB_MODE(musb), devctl);
772 handled = IRQ_HANDLED;
773
774 switch (musb->xceiv.state) {
775#ifdef CONFIG_USB_MUSB_HDRC_HCD
776 case OTG_STATE_A_HOST:
777 case OTG_STATE_A_SUSPEND:
778 musb_root_disconnect(musb);
779 if (musb->a_wait_bcon != 0)
780 musb_platform_try_idle(musb, jiffies
781 + msecs_to_jiffies(musb->a_wait_bcon));
782 break;
783#endif /* HOST */
784#ifdef CONFIG_USB_MUSB_OTG
785 case OTG_STATE_B_HOST:
786 musb_hnp_stop(musb);
787 break;
788 case OTG_STATE_A_PERIPHERAL:
789 musb_hnp_stop(musb);
790 musb_root_disconnect(musb);
791 /* FALLTHROUGH */
792 case OTG_STATE_B_WAIT_ACON:
793 /* FALLTHROUGH */
794#endif /* OTG */
795#ifdef CONFIG_USB_GADGET_MUSB_HDRC
796 case OTG_STATE_B_PERIPHERAL:
797 case OTG_STATE_B_IDLE:
798 musb_g_disconnect(musb);
799 break;
800#endif /* GADGET */
801 default:
802 WARNING("unhandled DISCONNECT transition (%s)\n",
803 otg_state_string(musb));
804 break;
805 }
806
807 schedule_work(&musb->irq_work);
808 }
809
810 if (int_usb & MUSB_INTR_SUSPEND) {
811 DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
812 otg_state_string(musb), devctl, power);
813 handled = IRQ_HANDLED;
814
815 switch (musb->xceiv.state) {
816#ifdef CONFIG_USB_MUSB_OTG
817 case OTG_STATE_A_PERIPHERAL:
818 /*
819 * We cannot stop HNP here, devctl BDEVICE might be
820 * still set.
821 */
822 break;
823#endif
824 case OTG_STATE_B_PERIPHERAL:
825 musb_g_suspend(musb);
826 musb->is_active = is_otg_enabled(musb)
827 && musb->xceiv.gadget->b_hnp_enable;
828 if (musb->is_active) {
829#ifdef CONFIG_USB_MUSB_OTG
830 musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
831 DBG(1, "HNP: Setting timer for b_ase0_brst\n");
832 musb_otg_timer.data = (unsigned long)musb;
833 mod_timer(&musb_otg_timer, jiffies
834 + msecs_to_jiffies(TB_ASE0_BRST));
835#endif
836 }
837 break;
838 case OTG_STATE_A_WAIT_BCON:
839 if (musb->a_wait_bcon != 0)
840 musb_platform_try_idle(musb, jiffies
841 + msecs_to_jiffies(musb->a_wait_bcon));
842 break;
843 case OTG_STATE_A_HOST:
844 musb->xceiv.state = OTG_STATE_A_SUSPEND;
845 musb->is_active = is_otg_enabled(musb)
846 && musb->xceiv.host->b_hnp_enable;
847 break;
848 case OTG_STATE_B_HOST:
849 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
850 DBG(1, "REVISIT: SUSPEND as B_HOST\n");
851 break;
852 default:
853 /* "should not happen" */
854 musb->is_active = 0;
855 break;
856 }
857 schedule_work(&musb->irq_work);
858 }
859
860
861 return handled;
862}
863
864/*-------------------------------------------------------------------------*/
865
866/*
867* Program the HDRC to start (enable interrupts, dma, etc.).
868*/
869void musb_start(struct musb *musb)
870{
871 void __iomem *regs = musb->mregs;
872 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
873
874 DBG(2, "<== devctl %02x\n", devctl);
875
876 /* Set INT enable registers, enable interrupts */
877 musb_writew(regs, MUSB_INTRTXE, musb->epmask);
878 musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe);
879 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
880
881 musb_writeb(regs, MUSB_TESTMODE, 0);
882
883 /* put into basic highspeed mode and start session */
884 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
885 | MUSB_POWER_SOFTCONN
886 | MUSB_POWER_HSENAB
887 /* ENSUSPEND wedges tusb */
888 /* | MUSB_POWER_ENSUSPEND */
889 );
890
891 musb->is_active = 0;
892 devctl = musb_readb(regs, MUSB_DEVCTL);
893 devctl &= ~MUSB_DEVCTL_SESSION;
894
895 if (is_otg_enabled(musb)) {
896 /* session started after:
897 * (a) ID-grounded irq, host mode;
898 * (b) vbus present/connect IRQ, peripheral mode;
899 * (c) peripheral initiates, using SRP
900 */
901 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
902 musb->is_active = 1;
903 else
904 devctl |= MUSB_DEVCTL_SESSION;
905
906 } else if (is_host_enabled(musb)) {
907 /* assume ID pin is hard-wired to ground */
908 devctl |= MUSB_DEVCTL_SESSION;
909
910 } else /* peripheral is enabled */ {
911 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
912 musb->is_active = 1;
913 }
914 musb_platform_enable(musb);
915 musb_writeb(regs, MUSB_DEVCTL, devctl);
916}
917
918
919static void musb_generic_disable(struct musb *musb)
920{
921 void __iomem *mbase = musb->mregs;
922 u16 temp;
923
924 /* disable interrupts */
925 musb_writeb(mbase, MUSB_INTRUSBE, 0);
926 musb_writew(mbase, MUSB_INTRTXE, 0);
927 musb_writew(mbase, MUSB_INTRRXE, 0);
928
929 /* off */
930 musb_writeb(mbase, MUSB_DEVCTL, 0);
931
932 /* flush pending interrupts */
933 temp = musb_readb(mbase, MUSB_INTRUSB);
934 temp = musb_readw(mbase, MUSB_INTRTX);
935 temp = musb_readw(mbase, MUSB_INTRRX);
936
937}
938
939/*
940 * Make the HDRC stop (disable interrupts, etc.);
941 * reversible by musb_start
942 * called on gadget driver unregister
943 * with controller locked, irqs blocked
944 * acts as a NOP unless some role activated the hardware
945 */
946void musb_stop(struct musb *musb)
947{
948 /* stop IRQs, timers, ... */
949 musb_platform_disable(musb);
950 musb_generic_disable(musb);
951 DBG(3, "HDRC disabled\n");
952
953 /* FIXME
954 * - mark host and/or peripheral drivers unusable/inactive
955 * - disable DMA (and enable it in HdrcStart)
956 * - make sure we can musb_start() after musb_stop(); with
957 * OTG mode, gadget driver module rmmod/modprobe cycles that
958 * - ...
959 */
960 musb_platform_try_idle(musb, 0);
961}
962
963static void musb_shutdown(struct platform_device *pdev)
964{
965 struct musb *musb = dev_to_musb(&pdev->dev);
966 unsigned long flags;
967
968 spin_lock_irqsave(&musb->lock, flags);
969 musb_platform_disable(musb);
970 musb_generic_disable(musb);
971 if (musb->clock) {
972 clk_put(musb->clock);
973 musb->clock = NULL;
974 }
975 spin_unlock_irqrestore(&musb->lock, flags);
976
977 /* FIXME power down */
978}
979
980
981/*-------------------------------------------------------------------------*/
982
983/*
984 * The silicon either has hard-wired endpoint configurations, or else
985 * "dynamic fifo" sizing. The driver has support for both, though at this
986 * writing only the dynamic sizing is very well tested. We use normal
987 * idioms to so both modes are compile-tested, but dead code elimination
988 * leaves only the relevant one in the object file.
989 *
990 * We don't currently use dynamic fifo setup capability to do anything
991 * more than selecting one of a bunch of predefined configurations.
992 */
993#if defined(CONFIG_USB_TUSB6010) || \
994 defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
995static ushort __initdata fifo_mode = 4;
996#else
997static ushort __initdata fifo_mode = 2;
998#endif
999
1000/* "modprobe ... fifo_mode=1" etc */
1001module_param(fifo_mode, ushort, 0);
1002MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
1003
1004
1005enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
1006enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
1007
1008struct fifo_cfg {
1009 u8 hw_ep_num;
1010 enum fifo_style style;
1011 enum buf_mode mode;
1012 u16 maxpacket;
1013};
1014
1015/*
1016 * tables defining fifo_mode values. define more if you like.
1017 * for host side, make sure both halves of ep1 are set up.
1018 */
1019
1020/* mode 0 - fits in 2KB */
1021static struct fifo_cfg __initdata mode_0_cfg[] = {
1022{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1023{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1024{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
1025{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1026{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1027};
1028
1029/* mode 1 - fits in 4KB */
1030static struct fifo_cfg __initdata mode_1_cfg[] = {
1031{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1032{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1033{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1034{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1035{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1036};
1037
1038/* mode 2 - fits in 4KB */
1039static struct fifo_cfg __initdata mode_2_cfg[] = {
1040{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1041{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1042{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1043{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1044{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1045{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1046};
1047
1048/* mode 3 - fits in 4KB */
1049static struct fifo_cfg __initdata mode_3_cfg[] = {
1050{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1051{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1052{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1053{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1054{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1055{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1056};
1057
1058/* mode 4 - fits in 16KB */
1059static struct fifo_cfg __initdata mode_4_cfg[] = {
1060{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1061{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1062{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1063{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1064{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1065{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1066{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1067{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1068{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1069{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1070{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
1071{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
1072{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
1073{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
1074{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
1075{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
1076{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
1077{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
1078{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, },
1079{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, },
1080{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, },
1081{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, },
1082{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, },
1083{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, },
1084{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, },
1085{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, },
1086{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1087{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1088};
1089
1090
1091/*
1092 * configure a fifo; for non-shared endpoints, this may be called
1093 * once for a tx fifo and once for an rx fifo.
1094 *
1095 * returns negative errno or offset for next fifo.
1096 */
1097static int __init
1098fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
1099 const struct fifo_cfg *cfg, u16 offset)
1100{
1101 void __iomem *mbase = musb->mregs;
1102 int size = 0;
1103 u16 maxpacket = cfg->maxpacket;
1104 u16 c_off = offset >> 3;
1105 u8 c_size;
1106
1107 /* expect hw_ep has already been zero-initialized */
1108
1109 size = ffs(max(maxpacket, (u16) 8)) - 1;
1110 maxpacket = 1 << size;
1111
1112 c_size = size - 3;
1113 if (cfg->mode == BUF_DOUBLE) {
1114 if ((offset + (maxpacket << 1)) >
1115 (1 << (musb->config->ram_bits + 2)))
1116 return -EMSGSIZE;
1117 c_size |= MUSB_FIFOSZ_DPB;
1118 } else {
1119 if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
1120 return -EMSGSIZE;
1121 }
1122
1123 /* configure the FIFO */
1124 musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
1125
1126#ifdef CONFIG_USB_MUSB_HDRC_HCD
1127 /* EP0 reserved endpoint for control, bidirectional;
1128 * EP1 reserved for bulk, two unidirection halves.
1129 */
1130 if (hw_ep->epnum == 1)
1131 musb->bulk_ep = hw_ep;
1132 /* REVISIT error check: be sure ep0 can both rx and tx ... */
1133#endif
1134 switch (cfg->style) {
1135 case FIFO_TX:
1136 musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
1137 musb_writew(mbase, MUSB_TXFIFOADD, c_off);
1138 hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1139 hw_ep->max_packet_sz_tx = maxpacket;
1140 break;
1141 case FIFO_RX:
1142 musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
1143 musb_writew(mbase, MUSB_RXFIFOADD, c_off);
1144 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1145 hw_ep->max_packet_sz_rx = maxpacket;
1146 break;
1147 case FIFO_RXTX:
1148 musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
1149 musb_writew(mbase, MUSB_TXFIFOADD, c_off);
1150 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1151 hw_ep->max_packet_sz_rx = maxpacket;
1152
1153 musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
1154 musb_writew(mbase, MUSB_RXFIFOADD, c_off);
1155 hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
1156 hw_ep->max_packet_sz_tx = maxpacket;
1157
1158 hw_ep->is_shared_fifo = true;
1159 break;
1160 }
1161
1162 /* NOTE rx and tx endpoint irqs aren't managed separately,
1163 * which happens to be ok
1164 */
1165 musb->epmask |= (1 << hw_ep->epnum);
1166
1167 return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
1168}
1169
1170static struct fifo_cfg __initdata ep0_cfg = {
1171 .style = FIFO_RXTX, .maxpacket = 64,
1172};
1173
1174static int __init ep_config_from_table(struct musb *musb)
1175{
1176 const struct fifo_cfg *cfg;
1177 unsigned i, n;
1178 int offset;
1179 struct musb_hw_ep *hw_ep = musb->endpoints;
1180
1181 switch (fifo_mode) {
1182 default:
1183 fifo_mode = 0;
1184 /* FALLTHROUGH */
1185 case 0:
1186 cfg = mode_0_cfg;
1187 n = ARRAY_SIZE(mode_0_cfg);
1188 break;
1189 case 1:
1190 cfg = mode_1_cfg;
1191 n = ARRAY_SIZE(mode_1_cfg);
1192 break;
1193 case 2:
1194 cfg = mode_2_cfg;
1195 n = ARRAY_SIZE(mode_2_cfg);
1196 break;
1197 case 3:
1198 cfg = mode_3_cfg;
1199 n = ARRAY_SIZE(mode_3_cfg);
1200 break;
1201 case 4:
1202 cfg = mode_4_cfg;
1203 n = ARRAY_SIZE(mode_4_cfg);
1204 break;
1205 }
1206
1207 printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
1208 musb_driver_name, fifo_mode);
1209
1210
1211 offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
1212 /* assert(offset > 0) */
1213
1214 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
1215 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1216 */
1217
1218 for (i = 0; i < n; i++) {
1219 u8 epn = cfg->hw_ep_num;
1220
1221 if (epn >= musb->config->num_eps) {
1222 pr_debug("%s: invalid ep %d\n",
1223 musb_driver_name, epn);
1224 continue;
1225 }
1226 offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
1227 if (offset < 0) {
1228 pr_debug("%s: mem overrun, ep %d\n",
1229 musb_driver_name, epn);
1230 return -EINVAL;
1231 }
1232 epn++;
1233 musb->nr_endpoints = max(epn, musb->nr_endpoints);
1234 }
1235
1236 printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n",
1237 musb_driver_name,
1238 n + 1, musb->config->num_eps * 2 - 1,
1239 offset, (1 << (musb->config->ram_bits + 2)));
1240
1241#ifdef CONFIG_USB_MUSB_HDRC_HCD
1242 if (!musb->bulk_ep) {
1243 pr_debug("%s: missing bulk\n", musb_driver_name);
1244 return -EINVAL;
1245 }
1246#endif
1247
1248 return 0;
1249}
1250
1251
1252/*
1253 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1254 * @param musb the controller
1255 */
1256static int __init ep_config_from_hw(struct musb *musb)
1257{
1258 u8 epnum = 0, reg;
1259 struct musb_hw_ep *hw_ep;
1260 void *mbase = musb->mregs;
1261
1262 DBG(2, "<== static silicon ep config\n");
1263
1264 /* FIXME pick up ep0 maxpacket size */
1265
1266 for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
1267 musb_ep_select(mbase, epnum);
1268 hw_ep = musb->endpoints + epnum;
1269
1270 /* read from core using indexed model */
1271 reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
1272 if (!reg) {
1273 /* 0's returned when no more endpoints */
1274 break;
1275 }
1276 musb->nr_endpoints++;
1277 musb->epmask |= (1 << epnum);
1278
1279 hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
1280
1281 /* shared TX/RX FIFO? */
1282 if ((reg & 0xf0) == 0xf0) {
1283 hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
1284 hw_ep->is_shared_fifo = true;
1285 continue;
1286 } else {
1287 hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
1288 hw_ep->is_shared_fifo = false;
1289 }
1290
1291 /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1292
1293#ifdef CONFIG_USB_MUSB_HDRC_HCD
1294 /* pick an RX/TX endpoint for bulk */
1295 if (hw_ep->max_packet_sz_tx < 512
1296 || hw_ep->max_packet_sz_rx < 512)
1297 continue;
1298
1299 /* REVISIT: this algorithm is lazy, we should at least
1300 * try to pick a double buffered endpoint.
1301 */
1302 if (musb->bulk_ep)
1303 continue;
1304 musb->bulk_ep = hw_ep;
1305#endif
1306 }
1307
1308#ifdef CONFIG_USB_MUSB_HDRC_HCD
1309 if (!musb->bulk_ep) {
1310 pr_debug("%s: missing bulk\n", musb_driver_name);
1311 return -EINVAL;
1312 }
1313#endif
1314
1315 return 0;
1316}
1317
1318enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1319
1320/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1321 * configure endpoints, or take their config from silicon
1322 */
1323static int __init musb_core_init(u16 musb_type, struct musb *musb)
1324{
1325#ifdef MUSB_AHB_ID
1326 u32 data;
1327#endif
1328 u8 reg;
1329 char *type;
1330 u16 hwvers, rev_major, rev_minor;
1331 char aInfo[78], aRevision[32], aDate[12];
1332 void __iomem *mbase = musb->mregs;
1333 int status = 0;
1334 int i;
1335
1336 /* log core options (read using indexed model) */
1337 musb_ep_select(mbase, 0);
1338 reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
1339
1340 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1341 if (reg & MUSB_CONFIGDATA_DYNFIFO)
1342 strcat(aInfo, ", dyn FIFOs");
1343 if (reg & MUSB_CONFIGDATA_MPRXE) {
1344 strcat(aInfo, ", bulk combine");
1345#ifdef C_MP_RX
1346 musb->bulk_combine = true;
1347#else
1348 strcat(aInfo, " (X)"); /* no driver support */
1349#endif
1350 }
1351 if (reg & MUSB_CONFIGDATA_MPTXE) {
1352 strcat(aInfo, ", bulk split");
1353#ifdef C_MP_TX
1354 musb->bulk_split = true;
1355#else
1356 strcat(aInfo, " (X)"); /* no driver support */
1357#endif
1358 }
1359 if (reg & MUSB_CONFIGDATA_HBRXE) {
1360 strcat(aInfo, ", HB-ISO Rx");
1361 strcat(aInfo, " (X)"); /* no driver support */
1362 }
1363 if (reg & MUSB_CONFIGDATA_HBTXE) {
1364 strcat(aInfo, ", HB-ISO Tx");
1365 strcat(aInfo, " (X)"); /* no driver support */
1366 }
1367 if (reg & MUSB_CONFIGDATA_SOFTCONE)
1368 strcat(aInfo, ", SoftConn");
1369
1370 printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
1371 musb_driver_name, reg, aInfo);
1372
1373#ifdef MUSB_AHB_ID
1374 data = musb_readl(mbase, 0x404);
1375 sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
1376 (data >> 16) & 0xff, (data >> 24) & 0xff);
1377 /* FIXME ID2 and ID3 are unused */
1378 data = musb_readl(mbase, 0x408);
1379 printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
1380 data = musb_readl(mbase, 0x40c);
1381 printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
1382 reg = musb_readb(mbase, 0x400);
1383 musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
1384#else
1385 aDate[0] = 0;
1386#endif
1387 if (MUSB_CONTROLLER_MHDRC == musb_type) {
1388 musb->is_multipoint = 1;
1389 type = "M";
1390 } else {
1391 musb->is_multipoint = 0;
1392 type = "";
1393#ifdef CONFIG_USB_MUSB_HDRC_HCD
1394#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
1395 printk(KERN_ERR
1396 "%s: kernel must blacklist external hubs\n",
1397 musb_driver_name);
1398#endif
1399#endif
1400 }
1401
1402 /* log release info */
1403 hwvers = musb_readw(mbase, MUSB_HWVERS);
1404 rev_major = (hwvers >> 10) & 0x1f;
1405 rev_minor = hwvers & 0x3ff;
1406 snprintf(aRevision, 32, "%d.%d%s", rev_major,
1407 rev_minor, (hwvers & 0x8000) ? "RC" : "");
1408 printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
1409 musb_driver_name, type, aRevision, aDate);
1410
1411 /* configure ep0 */
1412 musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
1413 musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
1414
1415 /* discover endpoint configuration */
1416 musb->nr_endpoints = 1;
1417 musb->epmask = 1;
1418
1419 if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1420 if (musb->config->dyn_fifo)
1421 status = ep_config_from_table(musb);
1422 else {
1423 ERR("reconfigure software for Dynamic FIFOs\n");
1424 status = -ENODEV;
1425 }
1426 } else {
1427 if (!musb->config->dyn_fifo)
1428 status = ep_config_from_hw(musb);
1429 else {
1430 ERR("reconfigure software for static FIFOs\n");
1431 return -ENODEV;
1432 }
1433 }
1434
1435 if (status < 0)
1436 return status;
1437
1438 /* finish init, and print endpoint config */
1439 for (i = 0; i < musb->nr_endpoints; i++) {
1440 struct musb_hw_ep *hw_ep = musb->endpoints + i;
1441
1442 hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
1443#ifdef CONFIG_USB_TUSB6010
1444 hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
1445 hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
1446 hw_ep->fifo_sync_va =
1447 musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
1448
1449 if (i == 0)
1450 hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
1451 else
1452 hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
1453#endif
1454
1455 hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
1456#ifdef CONFIG_USB_MUSB_HDRC_HCD
1457 hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase;
1458 hw_ep->rx_reinit = 1;
1459 hw_ep->tx_reinit = 1;
1460#endif
1461
1462 if (hw_ep->max_packet_sz_tx) {
1463 printk(KERN_DEBUG
1464 "%s: hw_ep %d%s, %smax %d\n",
1465 musb_driver_name, i,
1466 hw_ep->is_shared_fifo ? "shared" : "tx",
1467 hw_ep->tx_double_buffered
1468 ? "doublebuffer, " : "",
1469 hw_ep->max_packet_sz_tx);
1470 }
1471 if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
1472 printk(KERN_DEBUG
1473 "%s: hw_ep %d%s, %smax %d\n",
1474 musb_driver_name, i,
1475 "rx",
1476 hw_ep->rx_double_buffered
1477 ? "doublebuffer, " : "",
1478 hw_ep->max_packet_sz_rx);
1479 }
1480 if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
1481 DBG(1, "hw_ep %d not configured\n", i);
1482 }
1483
1484 return 0;
1485}
1486
1487/*-------------------------------------------------------------------------*/
1488
1489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
1490
1491static irqreturn_t generic_interrupt(int irq, void *__hci)
1492{
1493 unsigned long flags;
1494 irqreturn_t retval = IRQ_NONE;
1495 struct musb *musb = __hci;
1496
1497 spin_lock_irqsave(&musb->lock, flags);
1498
1499 musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
1500 musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
1501 musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
1502
1503 if (musb->int_usb || musb->int_tx || musb->int_rx)
1504 retval = musb_interrupt(musb);
1505
1506 spin_unlock_irqrestore(&musb->lock, flags);
1507
1508 /* REVISIT we sometimes get spurious IRQs on g_ep0
1509 * not clear why...
1510 */
1511 if (retval != IRQ_HANDLED)
1512 DBG(5, "spurious?\n");
1513
1514 return IRQ_HANDLED;
1515}
1516
1517#else
1518#define generic_interrupt NULL
1519#endif
1520
1521/*
1522 * handle all the irqs defined by the HDRC core. for now we expect: other
1523 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1524 * will be assigned, and the irq will already have been acked.
1525 *
1526 * called in irq context with spinlock held, irqs blocked
1527 */
1528irqreturn_t musb_interrupt(struct musb *musb)
1529{
1530 irqreturn_t retval = IRQ_NONE;
1531 u8 devctl, power;
1532 int ep_num;
1533 u32 reg;
1534
1535 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1536 power = musb_readb(musb->mregs, MUSB_POWER);
1537
1538 DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n",
1539 (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
1540 musb->int_usb, musb->int_tx, musb->int_rx);
1541
1542 /* the core can interrupt us for multiple reasons; docs have
1543 * a generic interrupt flowchart to follow
1544 */
1545 if (musb->int_usb & STAGE0_MASK)
1546 retval |= musb_stage0_irq(musb, musb->int_usb,
1547 devctl, power);
1548
1549 /* "stage 1" is handling endpoint irqs */
1550
1551 /* handle endpoint 0 first */
1552 if (musb->int_tx & 1) {
1553 if (devctl & MUSB_DEVCTL_HM)
1554 retval |= musb_h_ep0_irq(musb);
1555 else
1556 retval |= musb_g_ep0_irq(musb);
1557 }
1558
1559 /* RX on endpoints 1-15 */
1560 reg = musb->int_rx >> 1;
1561 ep_num = 1;
1562 while (reg) {
1563 if (reg & 1) {
1564 /* musb_ep_select(musb->mregs, ep_num); */
1565 /* REVISIT just retval = ep->rx_irq(...) */
1566 retval = IRQ_HANDLED;
1567 if (devctl & MUSB_DEVCTL_HM) {
1568 if (is_host_capable())
1569 musb_host_rx(musb, ep_num);
1570 } else {
1571 if (is_peripheral_capable())
1572 musb_g_rx(musb, ep_num);
1573 }
1574 }
1575
1576 reg >>= 1;
1577 ep_num++;
1578 }
1579
1580 /* TX on endpoints 1-15 */
1581 reg = musb->int_tx >> 1;
1582 ep_num = 1;
1583 while (reg) {
1584 if (reg & 1) {
1585 /* musb_ep_select(musb->mregs, ep_num); */
1586 /* REVISIT just retval |= ep->tx_irq(...) */
1587 retval = IRQ_HANDLED;
1588 if (devctl & MUSB_DEVCTL_HM) {
1589 if (is_host_capable())
1590 musb_host_tx(musb, ep_num);
1591 } else {
1592 if (is_peripheral_capable())
1593 musb_g_tx(musb, ep_num);
1594 }
1595 }
1596 reg >>= 1;
1597 ep_num++;
1598 }
1599
1600 /* finish handling "global" interrupts after handling fifos */
1601 if (musb->int_usb)
1602 retval |= musb_stage2_irq(musb,
1603 musb->int_usb, devctl, power);
1604
1605 return retval;
1606}
1607
1608
1609#ifndef CONFIG_MUSB_PIO_ONLY
1610static int __initdata use_dma = 1;
1611
1612/* "modprobe ... use_dma=0" etc */
1613module_param(use_dma, bool, 0);
1614MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1615
1616void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
1617{
1618 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1619
1620 /* called with controller lock already held */
1621
1622 if (!epnum) {
1623#ifndef CONFIG_USB_TUSB_OMAP_DMA
1624 if (!is_cppi_enabled()) {
1625 /* endpoint 0 */
1626 if (devctl & MUSB_DEVCTL_HM)
1627 musb_h_ep0_irq(musb);
1628 else
1629 musb_g_ep0_irq(musb);
1630 }
1631#endif
1632 } else {
1633 /* endpoints 1..15 */
1634 if (transmit) {
1635 if (devctl & MUSB_DEVCTL_HM) {
1636 if (is_host_capable())
1637 musb_host_tx(musb, epnum);
1638 } else {
1639 if (is_peripheral_capable())
1640 musb_g_tx(musb, epnum);
1641 }
1642 } else {
1643 /* receive */
1644 if (devctl & MUSB_DEVCTL_HM) {
1645 if (is_host_capable())
1646 musb_host_rx(musb, epnum);
1647 } else {
1648 if (is_peripheral_capable())
1649 musb_g_rx(musb, epnum);
1650 }
1651 }
1652 }
1653}
1654
1655#else
1656#define use_dma 0
1657#endif
1658
1659/*-------------------------------------------------------------------------*/
1660
1661#ifdef CONFIG_SYSFS
1662
1663static ssize_t
1664musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1665{
1666 struct musb *musb = dev_to_musb(dev);
1667 unsigned long flags;
1668 int ret = -EINVAL;
1669
1670 spin_lock_irqsave(&musb->lock, flags);
1671 ret = sprintf(buf, "%s\n", otg_state_string(musb));
1672 spin_unlock_irqrestore(&musb->lock, flags);
1673
1674 return ret;
1675}
1676
1677static ssize_t
1678musb_mode_store(struct device *dev, struct device_attribute *attr,
1679 const char *buf, size_t n)
1680{
1681 struct musb *musb = dev_to_musb(dev);
1682 unsigned long flags;
1683
1684 spin_lock_irqsave(&musb->lock, flags);
1685 if (!strncmp(buf, "host", 4))
1686 musb_platform_set_mode(musb, MUSB_HOST);
1687 if (!strncmp(buf, "peripheral", 10))
1688 musb_platform_set_mode(musb, MUSB_PERIPHERAL);
1689 if (!strncmp(buf, "otg", 3))
1690 musb_platform_set_mode(musb, MUSB_OTG);
1691 spin_unlock_irqrestore(&musb->lock, flags);
1692
1693 return n;
1694}
1695static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
1696
1697static ssize_t
1698musb_vbus_store(struct device *dev, struct device_attribute *attr,
1699 const char *buf, size_t n)
1700{
1701 struct musb *musb = dev_to_musb(dev);
1702 unsigned long flags;
1703 unsigned long val;
1704
1705 if (sscanf(buf, "%lu", &val) < 1) {
1706 printk(KERN_ERR "Invalid VBUS timeout ms value\n");
1707 return -EINVAL;
1708 }
1709
1710 spin_lock_irqsave(&musb->lock, flags);
1711 musb->a_wait_bcon = val;
1712 if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
1713 musb->is_active = 0;
1714 musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
1715 spin_unlock_irqrestore(&musb->lock, flags);
1716
1717 return n;
1718}
1719
1720static ssize_t
1721musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1722{
1723 struct musb *musb = dev_to_musb(dev);
1724 unsigned long flags;
1725 unsigned long val;
1726 int vbus;
1727
1728 spin_lock_irqsave(&musb->lock, flags);
1729 val = musb->a_wait_bcon;
1730 vbus = musb_platform_get_vbus_status(musb);
1731 spin_unlock_irqrestore(&musb->lock, flags);
1732
1733 return sprintf(buf, "Vbus %s, timeout %lu\n",
1734 vbus ? "on" : "off", val);
1735}
1736static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
1737
1738#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1739
1740/* Gadget drivers can't know that a host is connected so they might want
1741 * to start SRP, but users can. This allows userspace to trigger SRP.
1742 */
1743static ssize_t
1744musb_srp_store(struct device *dev, struct device_attribute *attr,
1745 const char *buf, size_t n)
1746{
1747 struct musb *musb = dev_to_musb(dev);
1748 unsigned short srp;
1749
1750 if (sscanf(buf, "%hu", &srp) != 1
1751 || (srp != 1)) {
1752 printk(KERN_ERR "SRP: Value must be 1\n");
1753 return -EINVAL;
1754 }
1755
1756 if (srp == 1)
1757 musb_g_wakeup(musb);
1758
1759 return n;
1760}
1761static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
1762
1763#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
1764
1765#endif /* sysfs */
1766
1767/* Only used to provide driver mode change events */
1768static void musb_irq_work(struct work_struct *data)
1769{
1770 struct musb *musb = container_of(data, struct musb, irq_work);
1771 static int old_state;
1772
1773 if (musb->xceiv.state != old_state) {
1774 old_state = musb->xceiv.state;
1775 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1776 }
1777}
1778
1779/* --------------------------------------------------------------------------
1780 * Init support
1781 */
1782
1783static struct musb *__init
1784allocate_instance(struct device *dev,
1785 struct musb_hdrc_config *config, void __iomem *mbase)
1786{
1787 struct musb *musb;
1788 struct musb_hw_ep *ep;
1789 int epnum;
1790#ifdef CONFIG_USB_MUSB_HDRC_HCD
1791 struct usb_hcd *hcd;
1792
1793 hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
1794 if (!hcd)
1795 return NULL;
1796 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
1797
1798 musb = hcd_to_musb(hcd);
1799 INIT_LIST_HEAD(&musb->control);
1800 INIT_LIST_HEAD(&musb->in_bulk);
1801 INIT_LIST_HEAD(&musb->out_bulk);
1802
1803 hcd->uses_new_polling = 1;
1804
1805 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1806#else
1807 musb = kzalloc(sizeof *musb, GFP_KERNEL);
1808 if (!musb)
1809 return NULL;
1810 dev_set_drvdata(dev, musb);
1811
1812#endif
1813
1814 musb->mregs = mbase;
1815 musb->ctrl_base = mbase;
1816 musb->nIrq = -ENODEV;
1817 musb->config = config;
1818 for (epnum = 0, ep = musb->endpoints;
1819 epnum < musb->config->num_eps;
1820 epnum++, ep++) {
1821
1822 ep->musb = musb;
1823 ep->epnum = epnum;
1824 }
1825
1826 musb->controller = dev;
1827 return musb;
1828}
1829
1830static void musb_free(struct musb *musb)
1831{
1832 /* this has multiple entry modes. it handles fault cleanup after
1833 * probe(), where things may be partially set up, as well as rmmod
1834 * cleanup after everything's been de-activated.
1835 */
1836
1837#ifdef CONFIG_SYSFS
1838 device_remove_file(musb->controller, &dev_attr_mode);
1839 device_remove_file(musb->controller, &dev_attr_vbus);
1840#ifdef CONFIG_USB_MUSB_OTG
1841 device_remove_file(musb->controller, &dev_attr_srp);
1842#endif
1843#endif
1844
1845#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1846 musb_gadget_cleanup(musb);
1847#endif
1848
1849 if (musb->nIrq >= 0) {
1850 disable_irq_wake(musb->nIrq);
1851 free_irq(musb->nIrq, musb);
1852 }
1853 if (is_dma_capable() && musb->dma_controller) {
1854 struct dma_controller *c = musb->dma_controller;
1855
1856 (void) c->stop(c);
1857 dma_controller_destroy(c);
1858 }
1859
1860 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1861 musb_platform_exit(musb);
1862 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1863
1864 if (musb->clock) {
1865 clk_disable(musb->clock);
1866 clk_put(musb->clock);
1867 }
1868
1869#ifdef CONFIG_USB_MUSB_OTG
1870 put_device(musb->xceiv.dev);
1871#endif
1872
1873#ifdef CONFIG_USB_MUSB_HDRC_HCD
1874 usb_put_hcd(musb_to_hcd(musb));
1875#else
1876 kfree(musb);
1877#endif
1878}
1879
1880/*
1881 * Perform generic per-controller initialization.
1882 *
1883 * @pDevice: the controller (already clocked, etc)
1884 * @nIrq: irq
1885 * @mregs: virtual address of controller registers,
1886 * not yet corrected for platform-specific offsets
1887 */
1888static int __init
1889musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1890{
1891 int status;
1892 struct musb *musb;
1893 struct musb_hdrc_platform_data *plat = dev->platform_data;
1894
1895 /* The driver might handle more features than the board; OK.
1896 * Fail when the board needs a feature that's not enabled.
1897 */
1898 if (!plat) {
1899 dev_dbg(dev, "no platform_data?\n");
1900 return -ENODEV;
1901 }
1902 switch (plat->mode) {
1903 case MUSB_HOST:
1904#ifdef CONFIG_USB_MUSB_HDRC_HCD
1905 break;
1906#else
1907 goto bad_config;
1908#endif
1909 case MUSB_PERIPHERAL:
1910#ifdef CONFIG_USB_GADGET_MUSB_HDRC
1911 break;
1912#else
1913 goto bad_config;
1914#endif
1915 case MUSB_OTG:
1916#ifdef CONFIG_USB_MUSB_OTG
1917 break;
1918#else
1919bad_config:
1920#endif
1921 default:
1922 dev_err(dev, "incompatible Kconfig role setting\n");
1923 return -EINVAL;
1924 }
1925
1926 /* allocate */
1927 musb = allocate_instance(dev, plat->config, ctrl);
1928 if (!musb)
1929 return -ENOMEM;
1930
1931 spin_lock_init(&musb->lock);
1932 musb->board_mode = plat->mode;
1933 musb->board_set_power = plat->set_power;
1934 musb->set_clock = plat->set_clock;
1935 musb->min_power = plat->min_power;
1936
1937 /* Clock usage is chip-specific ... functional clock (DaVinci,
1938 * OMAP2430), or PHY ref (some TUSB6010 boards). All this core
1939 * code does is make sure a clock handle is available; platform
1940 * code manages it during start/stop and suspend/resume.
1941 */
1942 if (plat->clock) {
1943 musb->clock = clk_get(dev, plat->clock);
1944 if (IS_ERR(musb->clock)) {
1945 status = PTR_ERR(musb->clock);
1946 musb->clock = NULL;
1947 goto fail;
1948 }
1949 }
1950
1951 /* assume vbus is off */
1952
1953 /* platform adjusts musb->mregs and musb->isr if needed,
1954 * and activates clocks
1955 */
1956 musb->isr = generic_interrupt;
1957 status = musb_platform_init(musb);
1958
1959 if (status < 0)
1960 goto fail;
1961 if (!musb->isr) {
1962 status = -ENODEV;
1963 goto fail2;
1964 }
1965
1966#ifndef CONFIG_MUSB_PIO_ONLY
1967 if (use_dma && dev->dma_mask) {
1968 struct dma_controller *c;
1969
1970 c = dma_controller_create(musb, musb->mregs);
1971 musb->dma_controller = c;
1972 if (c)
1973 (void) c->start(c);
1974 }
1975#endif
1976 /* ideally this would be abstracted in platform setup */
1977 if (!is_dma_capable() || !musb->dma_controller)
1978 dev->dma_mask = NULL;
1979
1980 /* be sure interrupts are disabled before connecting ISR */
1981 musb_platform_disable(musb);
1982 musb_generic_disable(musb);
1983
1984 /* setup musb parts of the core (especially endpoints) */
1985 status = musb_core_init(plat->config->multipoint
1986 ? MUSB_CONTROLLER_MHDRC
1987 : MUSB_CONTROLLER_HDRC, musb);
1988 if (status < 0)
1989 goto fail2;
1990
1991 /* Init IRQ workqueue before request_irq */
1992 INIT_WORK(&musb->irq_work, musb_irq_work);
1993
1994 /* attach to the IRQ */
1995 if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) {
1996 dev_err(dev, "request_irq %d failed!\n", nIrq);
1997 status = -ENODEV;
1998 goto fail2;
1999 }
2000 musb->nIrq = nIrq;
2001/* FIXME this handles wakeup irqs wrong */
2002 if (enable_irq_wake(nIrq) == 0)
2003 device_init_wakeup(dev, 1);
2004
2005 pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
2006 musb_driver_name,
2007 ({char *s;
2008 switch (musb->board_mode) {
2009 case MUSB_HOST: s = "Host"; break;
2010 case MUSB_PERIPHERAL: s = "Peripheral"; break;
2011 default: s = "OTG"; break;
2012 }; s; }),
2013 ctrl,
2014 (is_dma_capable() && musb->dma_controller)
2015 ? "DMA" : "PIO",
2016 musb->nIrq);
2017
2018#ifdef CONFIG_USB_MUSB_HDRC_HCD
2019 /* host side needs more setup, except for no-host modes */
2020 if (musb->board_mode != MUSB_PERIPHERAL) {
2021 struct usb_hcd *hcd = musb_to_hcd(musb);
2022
2023 if (musb->board_mode == MUSB_OTG)
2024 hcd->self.otg_port = 1;
2025 musb->xceiv.host = &hcd->self;
2026 hcd->power_budget = 2 * (plat->power ? : 250);
2027 }
2028#endif /* CONFIG_USB_MUSB_HDRC_HCD */
2029
2030 /* For the host-only role, we can activate right away.
2031 * (We expect the ID pin to be forcibly grounded!!)
2032 * Otherwise, wait till the gadget driver hooks up.
2033 */
2034 if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
2035 MUSB_HST_MODE(musb);
2036 musb->xceiv.default_a = 1;
2037 musb->xceiv.state = OTG_STATE_A_IDLE;
2038
2039 status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
2040
2041 DBG(1, "%s mode, status %d, devctl %02x %c\n",
2042 "HOST", status,
2043 musb_readb(musb->mregs, MUSB_DEVCTL),
2044 (musb_readb(musb->mregs, MUSB_DEVCTL)
2045 & MUSB_DEVCTL_BDEVICE
2046 ? 'B' : 'A'));
2047
2048 } else /* peripheral is enabled */ {
2049 MUSB_DEV_MODE(musb);
2050 musb->xceiv.default_a = 0;
2051 musb->xceiv.state = OTG_STATE_B_IDLE;
2052
2053 status = musb_gadget_setup(musb);
2054
2055 DBG(1, "%s mode, status %d, dev%02x\n",
2056 is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
2057 status,
2058 musb_readb(musb->mregs, MUSB_DEVCTL));
2059
2060 }
2061
2062 if (status == 0)
2063 musb_debug_create("driver/musb_hdrc", musb);
2064 else {
2065fail:
2066 if (musb->clock)
2067 clk_put(musb->clock);
2068 device_init_wakeup(dev, 0);
2069 musb_free(musb);
2070 return status;
2071 }
2072
2073#ifdef CONFIG_SYSFS
2074 status = device_create_file(dev, &dev_attr_mode);
2075 status = device_create_file(dev, &dev_attr_vbus);
2076#ifdef CONFIG_USB_GADGET_MUSB_HDRC
2077 status = device_create_file(dev, &dev_attr_srp);
2078#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
2079 status = 0;
2080#endif
2081
2082 return status;
2083
2084fail2:
2085 musb_platform_exit(musb);
2086 goto fail;
2087}
2088
2089/*-------------------------------------------------------------------------*/
2090
2091/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2092 * bridge to a platform device; this driver then suffices.
2093 */
2094
2095#ifndef CONFIG_MUSB_PIO_ONLY
2096static u64 *orig_dma_mask;
2097#endif
2098
2099static int __init musb_probe(struct platform_device *pdev)
2100{
2101 struct device *dev = &pdev->dev;
2102 int irq = platform_get_irq(pdev, 0);
2103 struct resource *iomem;
2104 void __iomem *base;
2105
2106 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2107 if (!iomem || irq == 0)
2108 return -ENODEV;
2109
2110 base = ioremap(iomem->start, iomem->end - iomem->start + 1);
2111 if (!base) {
2112 dev_err(dev, "ioremap failed\n");
2113 return -ENOMEM;
2114 }
2115
2116#ifndef CONFIG_MUSB_PIO_ONLY
2117 /* clobbered by use_dma=n */
2118 orig_dma_mask = dev->dma_mask;
2119#endif
2120 return musb_init_controller(dev, irq, base);
2121}
2122
2123static int __devexit musb_remove(struct platform_device *pdev)
2124{
2125 struct musb *musb = dev_to_musb(&pdev->dev);
2126 void __iomem *ctrl_base = musb->ctrl_base;
2127
2128 /* this gets called on rmmod.
2129 * - Host mode: host may still be active
2130 * - Peripheral mode: peripheral is deactivated (or never-activated)
2131 * - OTG mode: both roles are deactivated (or never-activated)
2132 */
2133 musb_shutdown(pdev);
2134 musb_debug_delete("driver/musb_hdrc", musb);
2135#ifdef CONFIG_USB_MUSB_HDRC_HCD
2136 if (musb->board_mode == MUSB_HOST)
2137 usb_remove_hcd(musb_to_hcd(musb));
2138#endif
2139 musb_free(musb);
2140 iounmap(ctrl_base);
2141 device_init_wakeup(&pdev->dev, 0);
2142#ifndef CONFIG_MUSB_PIO_ONLY
2143 pdev->dev.dma_mask = orig_dma_mask;
2144#endif
2145 return 0;
2146}
2147
2148#ifdef CONFIG_PM
2149
2150static int musb_suspend(struct platform_device *pdev, pm_message_t message)
2151{
2152 unsigned long flags;
2153 struct musb *musb = dev_to_musb(&pdev->dev);
2154
2155 if (!musb->clock)
2156 return 0;
2157
2158 spin_lock_irqsave(&musb->lock, flags);
2159
2160 if (is_peripheral_active(musb)) {
2161 /* FIXME force disconnect unless we know USB will wake
2162 * the system up quickly enough to respond ...
2163 */
2164 } else if (is_host_active(musb)) {
2165 /* we know all the children are suspended; sometimes
2166 * they will even be wakeup-enabled.
2167 */
2168 }
2169
2170 if (musb->set_clock)
2171 musb->set_clock(musb->clock, 0);
2172 else
2173 clk_disable(musb->clock);
2174 spin_unlock_irqrestore(&musb->lock, flags);
2175 return 0;
2176}
2177
2178static int musb_resume(struct platform_device *pdev)
2179{
2180 unsigned long flags;
2181 struct musb *musb = dev_to_musb(&pdev->dev);
2182
2183 if (!musb->clock)
2184 return 0;
2185
2186 spin_lock_irqsave(&musb->lock, flags);
2187
2188 if (musb->set_clock)
2189 musb->set_clock(musb->clock, 1);
2190 else
2191 clk_enable(musb->clock);
2192
2193 /* for static cmos like DaVinci, register values were preserved
2194 * unless for some reason the whole soc powered down and we're
2195 * not treating that as a whole-system restart (e.g. swsusp)
2196 */
2197 spin_unlock_irqrestore(&musb->lock, flags);
2198 return 0;
2199}
2200
2201#else
2202#define musb_suspend NULL
2203#define musb_resume NULL
2204#endif
2205
2206static struct platform_driver musb_driver = {
2207 .driver = {
2208 .name = (char *)musb_driver_name,
2209 .bus = &platform_bus_type,
2210 .owner = THIS_MODULE,
2211 },
2212 .remove = __devexit_p(musb_remove),
2213 .shutdown = musb_shutdown,
2214 .suspend = musb_suspend,
2215 .resume = musb_resume,
2216};
2217
2218/*-------------------------------------------------------------------------*/
2219
2220static int __init musb_init(void)
2221{
2222#ifdef CONFIG_USB_MUSB_HDRC_HCD
2223 if (usb_disabled())
2224 return 0;
2225#endif
2226
2227 pr_info("%s: version " MUSB_VERSION ", "
2228#ifdef CONFIG_MUSB_PIO_ONLY
2229 "pio"
2230#elif defined(CONFIG_USB_TI_CPPI_DMA)
2231 "cppi-dma"
2232#elif defined(CONFIG_USB_INVENTRA_DMA)
2233 "musb-dma"
2234#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
2235 "tusb-omap-dma"
2236#else
2237 "?dma?"
2238#endif
2239 ", "
2240#ifdef CONFIG_USB_MUSB_OTG
2241 "otg (peripheral+host)"
2242#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
2243 "peripheral"
2244#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
2245 "host"
2246#endif
2247 ", debug=%d\n",
2248 musb_driver_name, debug);
2249 return platform_driver_probe(&musb_driver, musb_probe);
2250}
2251
2252/* make us init after usbcore and before usb
2253 * gadget and host-side drivers start to register
2254 */
2255subsys_initcall(musb_init);
2256
2257static void __exit musb_cleanup(void)
2258{
2259 platform_driver_unregister(&musb_driver);
2260}
2261module_exit(musb_cleanup);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
new file mode 100644
index 000000000000..eade46d81708
--- /dev/null
+++ b/drivers/usb/musb/musb_core.h
@@ -0,0 +1,507 @@
1/*
2 * MUSB OTG driver defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_CORE_H__
36#define __MUSB_CORE_H__
37
38#include <linux/slab.h>
39#include <linux/list.h>
40#include <linux/interrupt.h>
41#include <linux/smp_lock.h>
42#include <linux/errno.h>
43#include <linux/clk.h>
44#include <linux/device.h>
45#include <linux/usb/ch9.h>
46#include <linux/usb/gadget.h>
47#include <linux/usb.h>
48#include <linux/usb/otg.h>
49#include <linux/usb/musb.h>
50
51struct musb;
52struct musb_hw_ep;
53struct musb_ep;
54
55
56#include "musb_debug.h"
57#include "musb_dma.h"
58
59#include "musb_io.h"
60#include "musb_regs.h"
61
62#include "musb_gadget.h"
63#include "../core/hcd.h"
64#include "musb_host.h"
65
66
67
68#ifdef CONFIG_USB_MUSB_OTG
69
70#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST)
71#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL)
72#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG)
73
74/* NOTE: otg and peripheral-only state machines start at B_IDLE.
75 * OTG or host-only go to A_IDLE when ID is sensed.
76 */
77#define is_peripheral_active(m) (!(m)->is_host)
78#define is_host_active(m) ((m)->is_host)
79
80#else
81#define is_peripheral_enabled(musb) is_peripheral_capable()
82#define is_host_enabled(musb) is_host_capable()
83#define is_otg_enabled(musb) 0
84
85#define is_peripheral_active(musb) is_peripheral_capable()
86#define is_host_active(musb) is_host_capable()
87#endif
88
89#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL)
90/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always
91 * override that choice selection (often USB_GADGET_DUMMY_HCD).
92 */
93#ifndef CONFIG_USB_GADGET_MUSB_HDRC
94#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
95#endif
96#endif /* need MUSB gadget selection */
97
98
99#ifdef CONFIG_PROC_FS
100#include <linux/fs.h>
101#define MUSB_CONFIG_PROC_FS
102#endif
103
104/****************************** PERIPHERAL ROLE *****************************/
105
106#ifdef CONFIG_USB_GADGET_MUSB_HDRC
107
108#define is_peripheral_capable() (1)
109
110extern irqreturn_t musb_g_ep0_irq(struct musb *);
111extern void musb_g_tx(struct musb *, u8);
112extern void musb_g_rx(struct musb *, u8);
113extern void musb_g_reset(struct musb *);
114extern void musb_g_suspend(struct musb *);
115extern void musb_g_resume(struct musb *);
116extern void musb_g_wakeup(struct musb *);
117extern void musb_g_disconnect(struct musb *);
118
119#else
120
121#define is_peripheral_capable() (0)
122
123static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
124static inline void musb_g_reset(struct musb *m) {}
125static inline void musb_g_suspend(struct musb *m) {}
126static inline void musb_g_resume(struct musb *m) {}
127static inline void musb_g_wakeup(struct musb *m) {}
128static inline void musb_g_disconnect(struct musb *m) {}
129
130#endif
131
132/****************************** HOST ROLE ***********************************/
133
134#ifdef CONFIG_USB_MUSB_HDRC_HCD
135
136#define is_host_capable() (1)
137
138extern irqreturn_t musb_h_ep0_irq(struct musb *);
139extern void musb_host_tx(struct musb *, u8);
140extern void musb_host_rx(struct musb *, u8);
141
142#else
143
144#define is_host_capable() (0)
145
146static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
147static inline void musb_host_tx(struct musb *m, u8 e) {}
148static inline void musb_host_rx(struct musb *m, u8 e) {}
149
150#endif
151
152
153/****************************** CONSTANTS ********************************/
154
155#ifndef MUSB_C_NUM_EPS
156#define MUSB_C_NUM_EPS ((u8)16)
157#endif
158
159#ifndef MUSB_MAX_END0_PACKET
160#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
161#endif
162
163/* host side ep0 states */
164enum musb_h_ep0_state {
165 MUSB_EP0_IDLE,
166 MUSB_EP0_START, /* expect ack of setup */
167 MUSB_EP0_IN, /* expect IN DATA */
168 MUSB_EP0_OUT, /* expect ack of OUT DATA */
169 MUSB_EP0_STATUS, /* expect ack of STATUS */
170} __attribute__ ((packed));
171
172/* peripheral side ep0 states */
173enum musb_g_ep0_state {
174 MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */
175 MUSB_EP0_STAGE_TX, /* IN data */
176 MUSB_EP0_STAGE_RX, /* OUT data */
177 MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
178 MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */
179 MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
180} __attribute__ ((packed));
181
182/* OTG protocol constants */
183#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
184#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */
185#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */
186
187/*************************** REGISTER ACCESS ********************************/
188
189/* Endpoint registers (other than dynfifo setup) can be accessed either
190 * directly with the "flat" model, or after setting up an index register.
191 */
192
193#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
194 || defined(CONFIG_ARCH_OMAP3430)
195/* REVISIT indexed access seemed to
196 * misbehave (on DaVinci) for at least peripheral IN ...
197 */
198#define MUSB_FLAT_REG
199#endif
200
201/* TUSB mapping: "flat" plus ep0 special cases */
202#if defined(CONFIG_USB_TUSB6010)
203#define musb_ep_select(_mbase, _epnum) \
204 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
205#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
206
207/* "flat" mapping: each endpoint has its own i/o address */
208#elif defined(MUSB_FLAT_REG)
209#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
210#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET
211
212/* "indexed" mapping: INDEX register controls register bank select */
213#else
214#define musb_ep_select(_mbase, _epnum) \
215 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
216#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET
217#endif
218
219/****************************** FUNCTIONS ********************************/
220
221#define MUSB_HST_MODE(_musb)\
222 { (_musb)->is_host = true; }
223#define MUSB_DEV_MODE(_musb) \
224 { (_musb)->is_host = false; }
225
226#define test_devctl_hst_mode(_x) \
227 (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
228
229#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
230
231/******************************** TYPES *************************************/
232
233/*
234 * struct musb_hw_ep - endpoint hardware (bidirectional)
235 *
236 * Ordered slightly for better cacheline locality.
237 */
238struct musb_hw_ep {
239 struct musb *musb;
240 void __iomem *fifo;
241 void __iomem *regs;
242
243#ifdef CONFIG_USB_TUSB6010
244 void __iomem *conf;
245#endif
246
247 /* index in musb->endpoints[] */
248 u8 epnum;
249
250 /* hardware configuration, possibly dynamic */
251 bool is_shared_fifo;
252 bool tx_double_buffered;
253 bool rx_double_buffered;
254 u16 max_packet_sz_tx;
255 u16 max_packet_sz_rx;
256
257 struct dma_channel *tx_channel;
258 struct dma_channel *rx_channel;
259
260#ifdef CONFIG_USB_TUSB6010
261 /* TUSB has "asynchronous" and "synchronous" dma modes */
262 dma_addr_t fifo_async;
263 dma_addr_t fifo_sync;
264 void __iomem *fifo_sync_va;
265#endif
266
267#ifdef CONFIG_USB_MUSB_HDRC_HCD
268 void __iomem *target_regs;
269
270 /* currently scheduled peripheral endpoint */
271 struct musb_qh *in_qh;
272 struct musb_qh *out_qh;
273
274 u8 rx_reinit;
275 u8 tx_reinit;
276#endif
277
278#ifdef CONFIG_USB_GADGET_MUSB_HDRC
279 /* peripheral side */
280 struct musb_ep ep_in; /* TX */
281 struct musb_ep ep_out; /* RX */
282#endif
283};
284
285static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
286{
287#ifdef CONFIG_USB_GADGET_MUSB_HDRC
288 return next_request(&hw_ep->ep_in);
289#else
290 return NULL;
291#endif
292}
293
294static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
295{
296#ifdef CONFIG_USB_GADGET_MUSB_HDRC
297 return next_request(&hw_ep->ep_out);
298#else
299 return NULL;
300#endif
301}
302
303/*
304 * struct musb - Driver instance data.
305 */
306struct musb {
307 /* device lock */
308 spinlock_t lock;
309 struct clk *clock;
310 irqreturn_t (*isr)(int, void *);
311 struct work_struct irq_work;
312
313/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
314#define MUSB_PORT_STAT_RESUME (1 << 31)
315
316 u32 port1_status;
317
318#ifdef CONFIG_USB_MUSB_HDRC_HCD
319 unsigned long rh_timer;
320
321 enum musb_h_ep0_state ep0_stage;
322
323 /* bulk traffic normally dedicates endpoint hardware, and each
324 * direction has its own ring of host side endpoints.
325 * we try to progress the transfer at the head of each endpoint's
326 * queue until it completes or NAKs too much; then we try the next
327 * endpoint.
328 */
329 struct musb_hw_ep *bulk_ep;
330
331 struct list_head control; /* of musb_qh */
332 struct list_head in_bulk; /* of musb_qh */
333 struct list_head out_bulk; /* of musb_qh */
334 struct musb_qh *periodic[32]; /* tree of interrupt+iso */
335#endif
336
337 /* called with IRQs blocked; ON/nonzero implies starting a session,
338 * and waiting at least a_wait_vrise_tmout.
339 */
340 void (*board_set_vbus)(struct musb *, int is_on);
341
342 struct dma_controller *dma_controller;
343
344 struct device *controller;
345 void __iomem *ctrl_base;
346 void __iomem *mregs;
347
348#ifdef CONFIG_USB_TUSB6010
349 dma_addr_t async;
350 dma_addr_t sync;
351 void __iomem *sync_va;
352#endif
353
354 /* passed down from chip/board specific irq handlers */
355 u8 int_usb;
356 u16 int_rx;
357 u16 int_tx;
358
359 struct otg_transceiver xceiv;
360
361 int nIrq;
362
363 struct musb_hw_ep endpoints[MUSB_C_NUM_EPS];
364#define control_ep endpoints
365
366#define VBUSERR_RETRY_COUNT 3
367 u16 vbuserr_retry;
368 u16 epmask;
369 u8 nr_endpoints;
370
371 u8 board_mode; /* enum musb_mode */
372 int (*board_set_power)(int state);
373
374 int (*set_clock)(struct clk *clk, int is_active);
375
376 u8 min_power; /* vbus for periph, in mA/2 */
377
378 bool is_host;
379
380 int a_wait_bcon; /* VBUS timeout in msecs */
381 unsigned long idle_timeout; /* Next timeout in jiffies */
382
383 /* active means connected and not suspended */
384 unsigned is_active:1;
385
386 unsigned is_multipoint:1;
387 unsigned ignore_disconnect:1; /* during bus resets */
388
389#ifdef C_MP_TX
390 unsigned bulk_split:1;
391#define can_bulk_split(musb,type) \
392 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
393#else
394#define can_bulk_split(musb, type) 0
395#endif
396
397#ifdef C_MP_RX
398 unsigned bulk_combine:1;
399#define can_bulk_combine(musb,type) \
400 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
401#else
402#define can_bulk_combine(musb, type) 0
403#endif
404
405#ifdef CONFIG_USB_GADGET_MUSB_HDRC
406 /* is_suspended means USB B_PERIPHERAL suspend */
407 unsigned is_suspended:1;
408
409 /* may_wakeup means remote wakeup is enabled */
410 unsigned may_wakeup:1;
411
412 /* is_self_powered is reported in device status and the
413 * config descriptor. is_bus_powered means B_PERIPHERAL
414 * draws some VBUS current; both can be true.
415 */
416 unsigned is_self_powered:1;
417 unsigned is_bus_powered:1;
418
419 unsigned set_address:1;
420 unsigned test_mode:1;
421 unsigned softconnect:1;
422
423 u8 address;
424 u8 test_mode_nr;
425 u16 ackpend; /* ep0 */
426 enum musb_g_ep0_state ep0_state;
427 struct usb_gadget g; /* the gadget */
428 struct usb_gadget_driver *gadget_driver; /* its driver */
429#endif
430
431 struct musb_hdrc_config *config;
432
433#ifdef MUSB_CONFIG_PROC_FS
434 struct proc_dir_entry *proc_entry;
435#endif
436};
437
438static inline void musb_set_vbus(struct musb *musb, int is_on)
439{
440 musb->board_set_vbus(musb, is_on);
441}
442
443#ifdef CONFIG_USB_GADGET_MUSB_HDRC
444static inline struct musb *gadget_to_musb(struct usb_gadget *g)
445{
446 return container_of(g, struct musb, g);
447}
448#endif
449
450
451/***************************** Glue it together *****************************/
452
453extern const char musb_driver_name[];
454
455extern void musb_start(struct musb *musb);
456extern void musb_stop(struct musb *musb);
457
458extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
459extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
460
461extern void musb_load_testpacket(struct musb *);
462
463extern irqreturn_t musb_interrupt(struct musb *);
464
465extern void musb_platform_enable(struct musb *musb);
466extern void musb_platform_disable(struct musb *musb);
467
468extern void musb_hnp_stop(struct musb *musb);
469
470extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode);
471
472#if defined(CONFIG_USB_TUSB6010) || \
473 defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
474extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
475#else
476#define musb_platform_try_idle(x, y) do {} while (0)
477#endif
478
479#ifdef CONFIG_USB_TUSB6010
480extern int musb_platform_get_vbus_status(struct musb *musb);
481#else
482#define musb_platform_get_vbus_status(x) 0
483#endif
484
485extern int __init musb_platform_init(struct musb *musb);
486extern int musb_platform_exit(struct musb *musb);
487
488/*-------------------------- ProcFS definitions ---------------------*/
489
490struct proc_dir_entry;
491
492#if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS)
493extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data);
494extern void musb_debug_delete(char *name, struct musb *data);
495
496#else
497static inline struct proc_dir_entry *
498musb_debug_create(char *name, struct musb *data)
499{
500 return NULL;
501}
502static inline void musb_debug_delete(char *name, struct musb *data)
503{
504}
505#endif
506
507#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
new file mode 100644
index 000000000000..3bdb311e820d
--- /dev/null
+++ b/drivers/usb/musb/musb_debug.h
@@ -0,0 +1,66 @@
1/*
2 * MUSB OTG driver debug defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_LINUX_DEBUG_H__
36#define __MUSB_LINUX_DEBUG_H__
37
38#define yprintk(facility, format, args...) \
39 do { printk(facility "%s %d: " format , \
40 __func__, __LINE__ , ## args); } while (0)
41#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args)
42#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
43#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
44
45#define xprintk(level, facility, format, args...) do { \
46 if (_dbg_level(level)) { \
47 printk(facility "%s %d: " format , \
48 __func__, __LINE__ , ## args); \
49 } } while (0)
50
51#if MUSB_DEBUG > 0
52extern unsigned debug;
53#else
54#define debug 0
55#endif
56
57static inline int _dbg_level(unsigned l)
58{
59 return debug >= l;
60}
61
62#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
63
64extern const char *otg_state_string(struct musb *);
65
66#endif /* __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
new file mode 100644
index 000000000000..0a2c4e3602c1
--- /dev/null
+++ b/drivers/usb/musb/musb_dma.h
@@ -0,0 +1,172 @@
1/*
2 * MUSB OTG driver DMA controller abstraction
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_DMA_H__
36#define __MUSB_DMA_H__
37
38struct musb_hw_ep;
39
40/*
41 * DMA Controller Abstraction
42 *
43 * DMA Controllers are abstracted to allow use of a variety of different
44 * implementations of DMA, as allowed by the Inventra USB cores. On the
45 * host side, usbcore sets up the DMA mappings and flushes caches; on the
46 * peripheral side, the gadget controller driver does. Responsibilities
47 * of a DMA controller driver include:
48 *
49 * - Handling the details of moving multiple USB packets
50 * in cooperation with the Inventra USB core, including especially
51 * the correct RX side treatment of short packets and buffer-full
52 * states (both of which terminate transfers).
53 *
54 * - Knowing the correlation between dma channels and the
55 * Inventra core's local endpoint resources and data direction.
56 *
57 * - Maintaining a list of allocated/available channels.
58 *
59 * - Updating channel status on interrupts,
60 * whether shared with the Inventra core or separate.
61 */
62
63#define DMA_ADDR_INVALID (~(dma_addr_t)0)
64
65#ifndef CONFIG_MUSB_PIO_ONLY
66#define is_dma_capable() (1)
67#else
68#define is_dma_capable() (0)
69#endif
70
71#ifdef CONFIG_USB_TI_CPPI_DMA
72#define is_cppi_enabled() 1
73#else
74#define is_cppi_enabled() 0
75#endif
76
77#ifdef CONFIG_USB_TUSB_OMAP_DMA
78#define tusb_dma_omap() 1
79#else
80#define tusb_dma_omap() 0
81#endif
82
83/*
84 * DMA channel status ... updated by the dma controller driver whenever that
85 * status changes, and protected by the overall controller spinlock.
86 */
87enum dma_channel_status {
88 /* unallocated */
89 MUSB_DMA_STATUS_UNKNOWN,
90 /* allocated ... but not busy, no errors */
91 MUSB_DMA_STATUS_FREE,
92 /* busy ... transactions are active */
93 MUSB_DMA_STATUS_BUSY,
94 /* transaction(s) aborted due to ... dma or memory bus error */
95 MUSB_DMA_STATUS_BUS_ABORT,
96 /* transaction(s) aborted due to ... core error or USB fault */
97 MUSB_DMA_STATUS_CORE_ABORT
98};
99
100struct dma_controller;
101
102/**
103 * struct dma_channel - A DMA channel.
104 * @private_data: channel-private data
105 * @max_len: the maximum number of bytes the channel can move in one
106 * transaction (typically representing many USB maximum-sized packets)
107 * @actual_len: how many bytes have been transferred
108 * @status: current channel status (updated e.g. on interrupt)
109 * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
110 *
111 * channels are associated with an endpoint for the duration of at least
112 * one usb transfer.
113 */
114struct dma_channel {
115 void *private_data;
116 /* FIXME not void* private_data, but a dma_controller * */
117 size_t max_len;
118 size_t actual_len;
119 enum dma_channel_status status;
120 bool desired_mode;
121};
122
123/*
124 * dma_channel_status - return status of dma channel
125 * @c: the channel
126 *
127 * Returns the software's view of the channel status. If that status is BUSY
128 * then it's possible that the hardware has completed (or aborted) a transfer,
129 * so the driver needs to update that status.
130 */
131static inline enum dma_channel_status
132dma_channel_status(struct dma_channel *c)
133{
134 return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN;
135}
136
137/**
138 * struct dma_controller - A DMA Controller.
139 * @start: call this to start a DMA controller;
140 * return 0 on success, else negative errno
141 * @stop: call this to stop a DMA controller
142 * return 0 on success, else negative errno
143 * @channel_alloc: call this to allocate a DMA channel
144 * @channel_release: call this to release a DMA channel
145 * @channel_abort: call this to abort a pending DMA transaction,
146 * returning it to FREE (but allocated) state
147 *
148 * Controllers manage dma channels.
149 */
150struct dma_controller {
151 int (*start)(struct dma_controller *);
152 int (*stop)(struct dma_controller *);
153 struct dma_channel *(*channel_alloc)(struct dma_controller *,
154 struct musb_hw_ep *, u8 is_tx);
155 void (*channel_release)(struct dma_channel *);
156 int (*channel_program)(struct dma_channel *channel,
157 u16 maxpacket, u8 mode,
158 dma_addr_t dma_addr,
159 u32 length);
160 int (*channel_abort)(struct dma_channel *);
161};
162
163/* called after channel_program(), may indicate a fault */
164extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
165
166
167extern struct dma_controller *__init
168dma_controller_create(struct musb *, void __iomem *);
169
170extern void dma_controller_destroy(struct dma_controller *);
171
172#endif /* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
new file mode 100644
index 000000000000..d6a802c224fa
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.c
@@ -0,0 +1,2031 @@
1/*
2 * MUSB OTG driver peripheral support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/list.h>
37#include <linux/timer.h>
38#include <linux/module.h>
39#include <linux/smp.h>
40#include <linux/spinlock.h>
41#include <linux/delay.h>
42#include <linux/moduleparam.h>
43#include <linux/stat.h>
44#include <linux/dma-mapping.h>
45
46#include "musb_core.h"
47
48
49/* MUSB PERIPHERAL status 3-mar-2006:
50 *
51 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
52 * Minor glitches:
53 *
54 * + remote wakeup to Linux hosts work, but saw USBCV failures;
55 * in one test run (operator error?)
56 * + endpoint halt tests -- in both usbtest and usbcv -- seem
57 * to break when dma is enabled ... is something wrongly
58 * clearing SENDSTALL?
59 *
60 * - Mass storage behaved ok when last tested. Network traffic patterns
61 * (with lots of short transfers etc) need retesting; they turn up the
62 * worst cases of the DMA, since short packets are typical but are not
63 * required.
64 *
65 * - TX/IN
66 * + both pio and dma behave in with network and g_zero tests
67 * + no cppi throughput issues other than no-hw-queueing
68 * + failed with FLAT_REG (DaVinci)
69 * + seems to behave with double buffering, PIO -and- CPPI
70 * + with gadgetfs + AIO, requests got lost?
71 *
72 * - RX/OUT
73 * + both pio and dma behave in with network and g_zero tests
74 * + dma is slow in typical case (short_not_ok is clear)
75 * + double buffering ok with PIO
76 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
77 * + request lossage observed with gadgetfs
78 *
79 * - ISO not tested ... might work, but only weakly isochronous
80 *
81 * - Gadget driver disabling of softconnect during bind() is ignored; so
82 * drivers can't hold off host requests until userspace is ready.
83 * (Workaround: they can turn it off later.)
84 *
85 * - PORTABILITY (assumes PIO works):
86 * + DaVinci, basically works with cppi dma
87 * + OMAP 2430, ditto with mentor dma
88 * + TUSB 6010, platform-specific dma in the works
89 */
90
91/* ----------------------------------------------------------------------- */
92
93/*
94 * Immediately complete a request.
95 *
96 * @param request the request to complete
97 * @param status the status to complete the request with
98 * Context: controller locked, IRQs blocked.
99 */
100void musb_g_giveback(
101 struct musb_ep *ep,
102 struct usb_request *request,
103 int status)
104__releases(ep->musb->lock)
105__acquires(ep->musb->lock)
106{
107 struct musb_request *req;
108 struct musb *musb;
109 int busy = ep->busy;
110
111 req = to_musb_request(request);
112
113 list_del(&request->list);
114 if (req->request.status == -EINPROGRESS)
115 req->request.status = status;
116 musb = req->musb;
117
118 ep->busy = 1;
119 spin_unlock(&musb->lock);
120 if (is_dma_capable()) {
121 if (req->mapped) {
122 dma_unmap_single(musb->controller,
123 req->request.dma,
124 req->request.length,
125 req->tx
126 ? DMA_TO_DEVICE
127 : DMA_FROM_DEVICE);
128 req->request.dma = DMA_ADDR_INVALID;
129 req->mapped = 0;
130 } else if (req->request.dma != DMA_ADDR_INVALID)
131 dma_sync_single_for_cpu(musb->controller,
132 req->request.dma,
133 req->request.length,
134 req->tx
135 ? DMA_TO_DEVICE
136 : DMA_FROM_DEVICE);
137 }
138 if (request->status == 0)
139 DBG(5, "%s done request %p, %d/%d\n",
140 ep->end_point.name, request,
141 req->request.actual, req->request.length);
142 else
143 DBG(2, "%s request %p, %d/%d fault %d\n",
144 ep->end_point.name, request,
145 req->request.actual, req->request.length,
146 request->status);
147 req->request.complete(&req->ep->end_point, &req->request);
148 spin_lock(&musb->lock);
149 ep->busy = busy;
150}
151
152/* ----------------------------------------------------------------------- */
153
154/*
155 * Abort requests queued to an endpoint using the status. Synchronous.
156 * caller locked controller and blocked irqs, and selected this ep.
157 */
158static void nuke(struct musb_ep *ep, const int status)
159{
160 struct musb_request *req = NULL;
161 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
162
163 ep->busy = 1;
164
165 if (is_dma_capable() && ep->dma) {
166 struct dma_controller *c = ep->musb->dma_controller;
167 int value;
168 if (ep->is_in) {
169 musb_writew(epio, MUSB_TXCSR,
170 0 | MUSB_TXCSR_FLUSHFIFO);
171 musb_writew(epio, MUSB_TXCSR,
172 0 | MUSB_TXCSR_FLUSHFIFO);
173 } else {
174 musb_writew(epio, MUSB_RXCSR,
175 0 | MUSB_RXCSR_FLUSHFIFO);
176 musb_writew(epio, MUSB_RXCSR,
177 0 | MUSB_RXCSR_FLUSHFIFO);
178 }
179
180 value = c->channel_abort(ep->dma);
181 DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
182 c->channel_release(ep->dma);
183 ep->dma = NULL;
184 }
185
186 while (!list_empty(&(ep->req_list))) {
187 req = container_of(ep->req_list.next, struct musb_request,
188 request.list);
189 musb_g_giveback(ep, &req->request, status);
190 }
191}
192
193/* ----------------------------------------------------------------------- */
194
195/* Data transfers - pure PIO, pure DMA, or mixed mode */
196
197/*
198 * This assumes the separate CPPI engine is responding to DMA requests
199 * from the usb core ... sequenced a bit differently from mentor dma.
200 */
201
202static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
203{
204 if (can_bulk_split(musb, ep->type))
205 return ep->hw_ep->max_packet_sz_tx;
206 else
207 return ep->packet_sz;
208}
209
210
211#ifdef CONFIG_USB_INVENTRA_DMA
212
213/* Peripheral tx (IN) using Mentor DMA works as follows:
214 Only mode 0 is used for transfers <= wPktSize,
215 mode 1 is used for larger transfers,
216
217 One of the following happens:
218 - Host sends IN token which causes an endpoint interrupt
219 -> TxAvail
220 -> if DMA is currently busy, exit.
221 -> if queue is non-empty, txstate().
222
223 - Request is queued by the gadget driver.
224 -> if queue was previously empty, txstate()
225
226 txstate()
227 -> start
228 /\ -> setup DMA
229 | (data is transferred to the FIFO, then sent out when
230 | IN token(s) are recd from Host.
231 | -> DMA interrupt on completion
232 | calls TxAvail.
233 | -> stop DMA, ~DmaEenab,
234 | -> set TxPktRdy for last short pkt or zlp
235 | -> Complete Request
236 | -> Continue next request (call txstate)
237 |___________________________________|
238
239 * Non-Mentor DMA engines can of course work differently, such as by
240 * upleveling from irq-per-packet to irq-per-buffer.
241 */
242
243#endif
244
245/*
246 * An endpoint is transmitting data. This can be called either from
247 * the IRQ routine or from ep.queue() to kickstart a request on an
248 * endpoint.
249 *
250 * Context: controller locked, IRQs blocked, endpoint selected
251 */
252static void txstate(struct musb *musb, struct musb_request *req)
253{
254 u8 epnum = req->epnum;
255 struct musb_ep *musb_ep;
256 void __iomem *epio = musb->endpoints[epnum].regs;
257 struct usb_request *request;
258 u16 fifo_count = 0, csr;
259 int use_dma = 0;
260
261 musb_ep = req->ep;
262
263 /* we shouldn't get here while DMA is active ... but we do ... */
264 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
265 DBG(4, "dma pending...\n");
266 return;
267 }
268
269 /* read TXCSR before */
270 csr = musb_readw(epio, MUSB_TXCSR);
271
272 request = &req->request;
273 fifo_count = min(max_ep_writesize(musb, musb_ep),
274 (int)(request->length - request->actual));
275
276 if (csr & MUSB_TXCSR_TXPKTRDY) {
277 DBG(5, "%s old packet still ready , txcsr %03x\n",
278 musb_ep->end_point.name, csr);
279 return;
280 }
281
282 if (csr & MUSB_TXCSR_P_SENDSTALL) {
283 DBG(5, "%s stalling, txcsr %03x\n",
284 musb_ep->end_point.name, csr);
285 return;
286 }
287
288 DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
289 epnum, musb_ep->packet_sz, fifo_count,
290 csr);
291
292#ifndef CONFIG_MUSB_PIO_ONLY
293 if (is_dma_capable() && musb_ep->dma) {
294 struct dma_controller *c = musb->dma_controller;
295
296 use_dma = (request->dma != DMA_ADDR_INVALID);
297
298 /* MUSB_TXCSR_P_ISO is still set correctly */
299
300#ifdef CONFIG_USB_INVENTRA_DMA
301 {
302 size_t request_size;
303
304 /* setup DMA, then program endpoint CSR */
305 request_size = min(request->length,
306 musb_ep->dma->max_len);
307 if (request_size <= musb_ep->packet_sz)
308 musb_ep->dma->desired_mode = 0;
309 else
310 musb_ep->dma->desired_mode = 1;
311
312 use_dma = use_dma && c->channel_program(
313 musb_ep->dma, musb_ep->packet_sz,
314 musb_ep->dma->desired_mode,
315 request->dma, request_size);
316 if (use_dma) {
317 if (musb_ep->dma->desired_mode == 0) {
318 /* ASSERT: DMAENAB is clear */
319 csr &= ~(MUSB_TXCSR_AUTOSET |
320 MUSB_TXCSR_DMAMODE);
321 csr |= (MUSB_TXCSR_DMAENAB |
322 MUSB_TXCSR_MODE);
323 /* against programming guide */
324 } else
325 csr |= (MUSB_TXCSR_AUTOSET
326 | MUSB_TXCSR_DMAENAB
327 | MUSB_TXCSR_DMAMODE
328 | MUSB_TXCSR_MODE);
329
330 csr &= ~MUSB_TXCSR_P_UNDERRUN;
331 musb_writew(epio, MUSB_TXCSR, csr);
332 }
333 }
334
335#elif defined(CONFIG_USB_TI_CPPI_DMA)
336 /* program endpoint CSR first, then setup DMA */
337 csr &= ~(MUSB_TXCSR_AUTOSET
338 | MUSB_TXCSR_DMAMODE
339 | MUSB_TXCSR_P_UNDERRUN
340 | MUSB_TXCSR_TXPKTRDY);
341 csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
342 musb_writew(epio, MUSB_TXCSR,
343 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
344 | csr);
345
346 /* ensure writebuffer is empty */
347 csr = musb_readw(epio, MUSB_TXCSR);
348
349 /* NOTE host side sets DMAENAB later than this; both are
350 * OK since the transfer dma glue (between CPPI and Mentor
351 * fifos) just tells CPPI it could start. Data only moves
352 * to the USB TX fifo when both fifos are ready.
353 */
354
355 /* "mode" is irrelevant here; handle terminating ZLPs like
356 * PIO does, since the hardware RNDIS mode seems unreliable
357 * except for the last-packet-is-already-short case.
358 */
359 use_dma = use_dma && c->channel_program(
360 musb_ep->dma, musb_ep->packet_sz,
361 0,
362 request->dma,
363 request->length);
364 if (!use_dma) {
365 c->channel_release(musb_ep->dma);
366 musb_ep->dma = NULL;
367 /* ASSERT: DMAENAB clear */
368 csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
369 /* invariant: prequest->buf is non-null */
370 }
371#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
372 use_dma = use_dma && c->channel_program(
373 musb_ep->dma, musb_ep->packet_sz,
374 request->zero,
375 request->dma,
376 request->length);
377#endif
378 }
379#endif
380
381 if (!use_dma) {
382 musb_write_fifo(musb_ep->hw_ep, fifo_count,
383 (u8 *) (request->buf + request->actual));
384 request->actual += fifo_count;
385 csr |= MUSB_TXCSR_TXPKTRDY;
386 csr &= ~MUSB_TXCSR_P_UNDERRUN;
387 musb_writew(epio, MUSB_TXCSR, csr);
388 }
389
390 /* host may already have the data when this message shows... */
391 DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
392 musb_ep->end_point.name, use_dma ? "dma" : "pio",
393 request->actual, request->length,
394 musb_readw(epio, MUSB_TXCSR),
395 fifo_count,
396 musb_readw(epio, MUSB_TXMAXP));
397}
398
399/*
400 * FIFO state update (e.g. data ready).
401 * Called from IRQ, with controller locked.
402 */
403void musb_g_tx(struct musb *musb, u8 epnum)
404{
405 u16 csr;
406 struct usb_request *request;
407 u8 __iomem *mbase = musb->mregs;
408 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
409 void __iomem *epio = musb->endpoints[epnum].regs;
410 struct dma_channel *dma;
411
412 musb_ep_select(mbase, epnum);
413 request = next_request(musb_ep);
414
415 csr = musb_readw(epio, MUSB_TXCSR);
416 DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
417
418 dma = is_dma_capable() ? musb_ep->dma : NULL;
419 do {
420 /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
421 * probably rates reporting as a host error
422 */
423 if (csr & MUSB_TXCSR_P_SENTSTALL) {
424 csr |= MUSB_TXCSR_P_WZC_BITS;
425 csr &= ~MUSB_TXCSR_P_SENTSTALL;
426 musb_writew(epio, MUSB_TXCSR, csr);
427 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
428 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
429 musb->dma_controller->channel_abort(dma);
430 }
431
432 if (request)
433 musb_g_giveback(musb_ep, request, -EPIPE);
434
435 break;
436 }
437
438 if (csr & MUSB_TXCSR_P_UNDERRUN) {
439 /* we NAKed, no big deal ... little reason to care */
440 csr |= MUSB_TXCSR_P_WZC_BITS;
441 csr &= ~(MUSB_TXCSR_P_UNDERRUN
442 | MUSB_TXCSR_TXPKTRDY);
443 musb_writew(epio, MUSB_TXCSR, csr);
444 DBG(20, "underrun on ep%d, req %p\n", epnum, request);
445 }
446
447 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
448 /* SHOULD NOT HAPPEN ... has with cppi though, after
449 * changing SENDSTALL (and other cases); harmless?
450 */
451 DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
452 break;
453 }
454
455 if (request) {
456 u8 is_dma = 0;
457
458 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
459 is_dma = 1;
460 csr |= MUSB_TXCSR_P_WZC_BITS;
461 csr &= ~(MUSB_TXCSR_DMAENAB
462 | MUSB_TXCSR_P_UNDERRUN
463 | MUSB_TXCSR_TXPKTRDY);
464 musb_writew(epio, MUSB_TXCSR, csr);
465 /* ensure writebuffer is empty */
466 csr = musb_readw(epio, MUSB_TXCSR);
467 request->actual += musb_ep->dma->actual_len;
468 DBG(4, "TXCSR%d %04x, dma off, "
469 "len %zu, req %p\n",
470 epnum, csr,
471 musb_ep->dma->actual_len,
472 request);
473 }
474
475 if (is_dma || request->actual == request->length) {
476
477 /* First, maybe a terminating short packet.
478 * Some DMA engines might handle this by
479 * themselves.
480 */
481 if ((request->zero
482 && request->length
483 && (request->length
484 % musb_ep->packet_sz)
485 == 0)
486#ifdef CONFIG_USB_INVENTRA_DMA
487 || (is_dma &&
488 ((!dma->desired_mode) ||
489 (request->actual &
490 (musb_ep->packet_sz - 1))))
491#endif
492 ) {
493 /* on dma completion, fifo may not
494 * be available yet ...
495 */
496 if (csr & MUSB_TXCSR_TXPKTRDY)
497 break;
498
499 DBG(4, "sending zero pkt\n");
500 musb_writew(epio, MUSB_TXCSR,
501 MUSB_TXCSR_MODE
502 | MUSB_TXCSR_TXPKTRDY);
503 request->zero = 0;
504 }
505
506 /* ... or if not, then complete it */
507 musb_g_giveback(musb_ep, request, 0);
508
509 /* kickstart next transfer if appropriate;
510 * the packet that just completed might not
511 * be transmitted for hours or days.
512 * REVISIT for double buffering...
513 * FIXME revisit for stalls too...
514 */
515 musb_ep_select(mbase, epnum);
516 csr = musb_readw(epio, MUSB_TXCSR);
517 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
518 break;
519 request = musb_ep->desc
520 ? next_request(musb_ep)
521 : NULL;
522 if (!request) {
523 DBG(4, "%s idle now\n",
524 musb_ep->end_point.name);
525 break;
526 }
527 }
528
529 txstate(musb, to_musb_request(request));
530 }
531
532 } while (0);
533}
534
535/* ------------------------------------------------------------ */
536
537#ifdef CONFIG_USB_INVENTRA_DMA
538
539/* Peripheral rx (OUT) using Mentor DMA works as follows:
540 - Only mode 0 is used.
541
542 - Request is queued by the gadget class driver.
543 -> if queue was previously empty, rxstate()
544
545 - Host sends OUT token which causes an endpoint interrupt
546 /\ -> RxReady
547 | -> if request queued, call rxstate
548 | /\ -> setup DMA
549 | | -> DMA interrupt on completion
550 | | -> RxReady
551 | | -> stop DMA
552 | | -> ack the read
553 | | -> if data recd = max expected
554 | | by the request, or host
555 | | sent a short packet,
556 | | complete the request,
557 | | and start the next one.
558 | |_____________________________________|
559 | else just wait for the host
560 | to send the next OUT token.
561 |__________________________________________________|
562
563 * Non-Mentor DMA engines can of course work differently.
564 */
565
566#endif
567
568/*
569 * Context: controller locked, IRQs blocked, endpoint selected
570 */
571static void rxstate(struct musb *musb, struct musb_request *req)
572{
573 u16 csr = 0;
574 const u8 epnum = req->epnum;
575 struct usb_request *request = &req->request;
576 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
577 void __iomem *epio = musb->endpoints[epnum].regs;
578 u16 fifo_count = 0;
579 u16 len = musb_ep->packet_sz;
580
581 csr = musb_readw(epio, MUSB_RXCSR);
582
583 if (is_cppi_enabled() && musb_ep->dma) {
584 struct dma_controller *c = musb->dma_controller;
585 struct dma_channel *channel = musb_ep->dma;
586
587 /* NOTE: CPPI won't actually stop advancing the DMA
588 * queue after short packet transfers, so this is almost
589 * always going to run as IRQ-per-packet DMA so that
590 * faults will be handled correctly.
591 */
592 if (c->channel_program(channel,
593 musb_ep->packet_sz,
594 !request->short_not_ok,
595 request->dma + request->actual,
596 request->length - request->actual)) {
597
598 /* make sure that if an rxpkt arrived after the irq,
599 * the cppi engine will be ready to take it as soon
600 * as DMA is enabled
601 */
602 csr &= ~(MUSB_RXCSR_AUTOCLEAR
603 | MUSB_RXCSR_DMAMODE);
604 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
605 musb_writew(epio, MUSB_RXCSR, csr);
606 return;
607 }
608 }
609
610 if (csr & MUSB_RXCSR_RXPKTRDY) {
611 len = musb_readw(epio, MUSB_RXCOUNT);
612 if (request->actual < request->length) {
613#ifdef CONFIG_USB_INVENTRA_DMA
614 if (is_dma_capable() && musb_ep->dma) {
615 struct dma_controller *c;
616 struct dma_channel *channel;
617 int use_dma = 0;
618
619 c = musb->dma_controller;
620 channel = musb_ep->dma;
621
622 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
623 * mode 0 only. So we do not get endpoint interrupts due to DMA
624 * completion. We only get interrupts from DMA controller.
625 *
626 * We could operate in DMA mode 1 if we knew the size of the tranfer
627 * in advance. For mass storage class, request->length = what the host
628 * sends, so that'd work. But for pretty much everything else,
629 * request->length is routinely more than what the host sends. For
630 * most these gadgets, end of is signified either by a short packet,
631 * or filling the last byte of the buffer. (Sending extra data in
632 * that last pckate should trigger an overflow fault.) But in mode 1,
633 * we don't get DMA completion interrrupt for short packets.
634 *
635 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
636 * to get endpoint interrupt on every DMA req, but that didn't seem
637 * to work reliably.
638 *
639 * REVISIT an updated g_file_storage can set req->short_not_ok, which
640 * then becomes usable as a runtime "use mode 1" hint...
641 */
642
643 csr |= MUSB_RXCSR_DMAENAB;
644#ifdef USE_MODE1
645 csr |= MUSB_RXCSR_AUTOCLEAR;
646 /* csr |= MUSB_RXCSR_DMAMODE; */
647
648 /* this special sequence (enabling and then
649 * disabling MUSB_RXCSR_DMAMODE) is required
650 * to get DMAReq to activate
651 */
652 musb_writew(epio, MUSB_RXCSR,
653 csr | MUSB_RXCSR_DMAMODE);
654#endif
655 musb_writew(epio, MUSB_RXCSR, csr);
656
657 if (request->actual < request->length) {
658 int transfer_size = 0;
659#ifdef USE_MODE1
660 transfer_size = min(request->length,
661 channel->max_len);
662#else
663 transfer_size = len;
664#endif
665 if (transfer_size <= musb_ep->packet_sz)
666 musb_ep->dma->desired_mode = 0;
667 else
668 musb_ep->dma->desired_mode = 1;
669
670 use_dma = c->channel_program(
671 channel,
672 musb_ep->packet_sz,
673 channel->desired_mode,
674 request->dma
675 + request->actual,
676 transfer_size);
677 }
678
679 if (use_dma)
680 return;
681 }
682#endif /* Mentor's DMA */
683
684 fifo_count = request->length - request->actual;
685 DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
686 musb_ep->end_point.name,
687 len, fifo_count,
688 musb_ep->packet_sz);
689
690 fifo_count = min(len, fifo_count);
691
692#ifdef CONFIG_USB_TUSB_OMAP_DMA
693 if (tusb_dma_omap() && musb_ep->dma) {
694 struct dma_controller *c = musb->dma_controller;
695 struct dma_channel *channel = musb_ep->dma;
696 u32 dma_addr = request->dma + request->actual;
697 int ret;
698
699 ret = c->channel_program(channel,
700 musb_ep->packet_sz,
701 channel->desired_mode,
702 dma_addr,
703 fifo_count);
704 if (ret)
705 return;
706 }
707#endif
708
709 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
710 (request->buf + request->actual));
711 request->actual += fifo_count;
712
713 /* REVISIT if we left anything in the fifo, flush
714 * it and report -EOVERFLOW
715 */
716
717 /* ack the read! */
718 csr |= MUSB_RXCSR_P_WZC_BITS;
719 csr &= ~MUSB_RXCSR_RXPKTRDY;
720 musb_writew(epio, MUSB_RXCSR, csr);
721 }
722 }
723
724 /* reach the end or short packet detected */
725 if (request->actual == request->length || len < musb_ep->packet_sz)
726 musb_g_giveback(musb_ep, request, 0);
727}
728
729/*
730 * Data ready for a request; called from IRQ
731 */
732void musb_g_rx(struct musb *musb, u8 epnum)
733{
734 u16 csr;
735 struct usb_request *request;
736 void __iomem *mbase = musb->mregs;
737 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
738 void __iomem *epio = musb->endpoints[epnum].regs;
739 struct dma_channel *dma;
740
741 musb_ep_select(mbase, epnum);
742
743 request = next_request(musb_ep);
744
745 csr = musb_readw(epio, MUSB_RXCSR);
746 dma = is_dma_capable() ? musb_ep->dma : NULL;
747
748 DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
749 csr, dma ? " (dma)" : "", request);
750
751 if (csr & MUSB_RXCSR_P_SENTSTALL) {
752 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
753 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
754 (void) musb->dma_controller->channel_abort(dma);
755 request->actual += musb_ep->dma->actual_len;
756 }
757
758 csr |= MUSB_RXCSR_P_WZC_BITS;
759 csr &= ~MUSB_RXCSR_P_SENTSTALL;
760 musb_writew(epio, MUSB_RXCSR, csr);
761
762 if (request)
763 musb_g_giveback(musb_ep, request, -EPIPE);
764 goto done;
765 }
766
767 if (csr & MUSB_RXCSR_P_OVERRUN) {
768 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
769 csr &= ~MUSB_RXCSR_P_OVERRUN;
770 musb_writew(epio, MUSB_RXCSR, csr);
771
772 DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
773 if (request && request->status == -EINPROGRESS)
774 request->status = -EOVERFLOW;
775 }
776 if (csr & MUSB_RXCSR_INCOMPRX) {
777 /* REVISIT not necessarily an error */
778 DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
779 }
780
781 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
782 /* "should not happen"; likely RXPKTRDY pending for DMA */
783 DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
784 "%s busy, csr %04x\n",
785 musb_ep->end_point.name, csr);
786 goto done;
787 }
788
789 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
790 csr &= ~(MUSB_RXCSR_AUTOCLEAR
791 | MUSB_RXCSR_DMAENAB
792 | MUSB_RXCSR_DMAMODE);
793 musb_writew(epio, MUSB_RXCSR,
794 MUSB_RXCSR_P_WZC_BITS | csr);
795
796 request->actual += musb_ep->dma->actual_len;
797
798 DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
799 epnum, csr,
800 musb_readw(epio, MUSB_RXCSR),
801 musb_ep->dma->actual_len, request);
802
803#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
804 /* Autoclear doesn't clear RxPktRdy for short packets */
805 if ((dma->desired_mode == 0)
806 || (dma->actual_len
807 & (musb_ep->packet_sz - 1))) {
808 /* ack the read! */
809 csr &= ~MUSB_RXCSR_RXPKTRDY;
810 musb_writew(epio, MUSB_RXCSR, csr);
811 }
812
813 /* incomplete, and not short? wait for next IN packet */
814 if ((request->actual < request->length)
815 && (musb_ep->dma->actual_len
816 == musb_ep->packet_sz))
817 goto done;
818#endif
819 musb_g_giveback(musb_ep, request, 0);
820
821 request = next_request(musb_ep);
822 if (!request)
823 goto done;
824
825 /* don't start more i/o till the stall clears */
826 musb_ep_select(mbase, epnum);
827 csr = musb_readw(epio, MUSB_RXCSR);
828 if (csr & MUSB_RXCSR_P_SENDSTALL)
829 goto done;
830 }
831
832
833 /* analyze request if the ep is hot */
834 if (request)
835 rxstate(musb, to_musb_request(request));
836 else
837 DBG(3, "packet waiting for %s%s request\n",
838 musb_ep->desc ? "" : "inactive ",
839 musb_ep->end_point.name);
840
841done:
842 return;
843}
844
845/* ------------------------------------------------------------ */
846
847static int musb_gadget_enable(struct usb_ep *ep,
848 const struct usb_endpoint_descriptor *desc)
849{
850 unsigned long flags;
851 struct musb_ep *musb_ep;
852 struct musb_hw_ep *hw_ep;
853 void __iomem *regs;
854 struct musb *musb;
855 void __iomem *mbase;
856 u8 epnum;
857 u16 csr;
858 unsigned tmp;
859 int status = -EINVAL;
860
861 if (!ep || !desc)
862 return -EINVAL;
863
864 musb_ep = to_musb_ep(ep);
865 hw_ep = musb_ep->hw_ep;
866 regs = hw_ep->regs;
867 musb = musb_ep->musb;
868 mbase = musb->mregs;
869 epnum = musb_ep->current_epnum;
870
871 spin_lock_irqsave(&musb->lock, flags);
872
873 if (musb_ep->desc) {
874 status = -EBUSY;
875 goto fail;
876 }
877 musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
878
879 /* check direction and (later) maxpacket size against endpoint */
880 if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum)
881 goto fail;
882
883 /* REVISIT this rules out high bandwidth periodic transfers */
884 tmp = le16_to_cpu(desc->wMaxPacketSize);
885 if (tmp & ~0x07ff)
886 goto fail;
887 musb_ep->packet_sz = tmp;
888
889 /* enable the interrupts for the endpoint, set the endpoint
890 * packet size (or fail), set the mode, clear the fifo
891 */
892 musb_ep_select(mbase, epnum);
893 if (desc->bEndpointAddress & USB_DIR_IN) {
894 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
895
896 if (hw_ep->is_shared_fifo)
897 musb_ep->is_in = 1;
898 if (!musb_ep->is_in)
899 goto fail;
900 if (tmp > hw_ep->max_packet_sz_tx)
901 goto fail;
902
903 int_txe |= (1 << epnum);
904 musb_writew(mbase, MUSB_INTRTXE, int_txe);
905
906 /* REVISIT if can_bulk_split(), use by updating "tmp";
907 * likewise high bandwidth periodic tx
908 */
909 musb_writew(regs, MUSB_TXMAXP, tmp);
910
911 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
912 if (musb_readw(regs, MUSB_TXCSR)
913 & MUSB_TXCSR_FIFONOTEMPTY)
914 csr |= MUSB_TXCSR_FLUSHFIFO;
915 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
916 csr |= MUSB_TXCSR_P_ISO;
917
918 /* set twice in case of double buffering */
919 musb_writew(regs, MUSB_TXCSR, csr);
920 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
921 musb_writew(regs, MUSB_TXCSR, csr);
922
923 } else {
924 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
925
926 if (hw_ep->is_shared_fifo)
927 musb_ep->is_in = 0;
928 if (musb_ep->is_in)
929 goto fail;
930 if (tmp > hw_ep->max_packet_sz_rx)
931 goto fail;
932
933 int_rxe |= (1 << epnum);
934 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
935
936 /* REVISIT if can_bulk_combine() use by updating "tmp"
937 * likewise high bandwidth periodic rx
938 */
939 musb_writew(regs, MUSB_RXMAXP, tmp);
940
941 /* force shared fifo to OUT-only mode */
942 if (hw_ep->is_shared_fifo) {
943 csr = musb_readw(regs, MUSB_TXCSR);
944 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
945 musb_writew(regs, MUSB_TXCSR, csr);
946 }
947
948 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
949 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
950 csr |= MUSB_RXCSR_P_ISO;
951 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
952 csr |= MUSB_RXCSR_DISNYET;
953
954 /* set twice in case of double buffering */
955 musb_writew(regs, MUSB_RXCSR, csr);
956 musb_writew(regs, MUSB_RXCSR, csr);
957 }
958
959 /* NOTE: all the I/O code _should_ work fine without DMA, in case
960 * for some reason you run out of channels here.
961 */
962 if (is_dma_capable() && musb->dma_controller) {
963 struct dma_controller *c = musb->dma_controller;
964
965 musb_ep->dma = c->channel_alloc(c, hw_ep,
966 (desc->bEndpointAddress & USB_DIR_IN));
967 } else
968 musb_ep->dma = NULL;
969
970 musb_ep->desc = desc;
971 musb_ep->busy = 0;
972 status = 0;
973
974 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
975 musb_driver_name, musb_ep->end_point.name,
976 ({ char *s; switch (musb_ep->type) {
977 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
978 case USB_ENDPOINT_XFER_INT: s = "int"; break;
979 default: s = "iso"; break;
980 }; s; }),
981 musb_ep->is_in ? "IN" : "OUT",
982 musb_ep->dma ? "dma, " : "",
983 musb_ep->packet_sz);
984
985 schedule_work(&musb->irq_work);
986
987fail:
988 spin_unlock_irqrestore(&musb->lock, flags);
989 return status;
990}
991
992/*
993 * Disable an endpoint flushing all requests queued.
994 */
995static int musb_gadget_disable(struct usb_ep *ep)
996{
997 unsigned long flags;
998 struct musb *musb;
999 u8 epnum;
1000 struct musb_ep *musb_ep;
1001 void __iomem *epio;
1002 int status = 0;
1003
1004 musb_ep = to_musb_ep(ep);
1005 musb = musb_ep->musb;
1006 epnum = musb_ep->current_epnum;
1007 epio = musb->endpoints[epnum].regs;
1008
1009 spin_lock_irqsave(&musb->lock, flags);
1010 musb_ep_select(musb->mregs, epnum);
1011
1012 /* zero the endpoint sizes */
1013 if (musb_ep->is_in) {
1014 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1015 int_txe &= ~(1 << epnum);
1016 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1017 musb_writew(epio, MUSB_TXMAXP, 0);
1018 } else {
1019 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1020 int_rxe &= ~(1 << epnum);
1021 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1022 musb_writew(epio, MUSB_RXMAXP, 0);
1023 }
1024
1025 musb_ep->desc = NULL;
1026
1027 /* abort all pending DMA and requests */
1028 nuke(musb_ep, -ESHUTDOWN);
1029
1030 schedule_work(&musb->irq_work);
1031
1032 spin_unlock_irqrestore(&(musb->lock), flags);
1033
1034 DBG(2, "%s\n", musb_ep->end_point.name);
1035
1036 return status;
1037}
1038
1039/*
1040 * Allocate a request for an endpoint.
1041 * Reused by ep0 code.
1042 */
1043struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1044{
1045 struct musb_ep *musb_ep = to_musb_ep(ep);
1046 struct musb_request *request = NULL;
1047
1048 request = kzalloc(sizeof *request, gfp_flags);
1049 if (request) {
1050 INIT_LIST_HEAD(&request->request.list);
1051 request->request.dma = DMA_ADDR_INVALID;
1052 request->epnum = musb_ep->current_epnum;
1053 request->ep = musb_ep;
1054 }
1055
1056 return &request->request;
1057}
1058
1059/*
1060 * Free a request
1061 * Reused by ep0 code.
1062 */
1063void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1064{
1065 kfree(to_musb_request(req));
1066}
1067
1068static LIST_HEAD(buffers);
1069
1070struct free_record {
1071 struct list_head list;
1072 struct device *dev;
1073 unsigned bytes;
1074 dma_addr_t dma;
1075};
1076
1077/*
1078 * Context: controller locked, IRQs blocked.
1079 */
1080static void musb_ep_restart(struct musb *musb, struct musb_request *req)
1081{
1082 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1083 req->tx ? "TX/IN" : "RX/OUT",
1084 &req->request, req->request.length, req->epnum);
1085
1086 musb_ep_select(musb->mregs, req->epnum);
1087 if (req->tx)
1088 txstate(musb, req);
1089 else
1090 rxstate(musb, req);
1091}
1092
1093static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1094 gfp_t gfp_flags)
1095{
1096 struct musb_ep *musb_ep;
1097 struct musb_request *request;
1098 struct musb *musb;
1099 int status = 0;
1100 unsigned long lockflags;
1101
1102 if (!ep || !req)
1103 return -EINVAL;
1104 if (!req->buf)
1105 return -ENODATA;
1106
1107 musb_ep = to_musb_ep(ep);
1108 musb = musb_ep->musb;
1109
1110 request = to_musb_request(req);
1111 request->musb = musb;
1112
1113 if (request->ep != musb_ep)
1114 return -EINVAL;
1115
1116 DBG(4, "<== to %s request=%p\n", ep->name, req);
1117
1118 /* request is mine now... */
1119 request->request.actual = 0;
1120 request->request.status = -EINPROGRESS;
1121 request->epnum = musb_ep->current_epnum;
1122 request->tx = musb_ep->is_in;
1123
1124 if (is_dma_capable() && musb_ep->dma) {
1125 if (request->request.dma == DMA_ADDR_INVALID) {
1126 request->request.dma = dma_map_single(
1127 musb->controller,
1128 request->request.buf,
1129 request->request.length,
1130 request->tx
1131 ? DMA_TO_DEVICE
1132 : DMA_FROM_DEVICE);
1133 request->mapped = 1;
1134 } else {
1135 dma_sync_single_for_device(musb->controller,
1136 request->request.dma,
1137 request->request.length,
1138 request->tx
1139 ? DMA_TO_DEVICE
1140 : DMA_FROM_DEVICE);
1141 request->mapped = 0;
1142 }
1143 } else if (!req->buf) {
1144 return -ENODATA;
1145 } else
1146 request->mapped = 0;
1147
1148 spin_lock_irqsave(&musb->lock, lockflags);
1149
1150 /* don't queue if the ep is down */
1151 if (!musb_ep->desc) {
1152 DBG(4, "req %p queued to %s while ep %s\n",
1153 req, ep->name, "disabled");
1154 status = -ESHUTDOWN;
1155 goto cleanup;
1156 }
1157
1158 /* add request to the list */
1159 list_add_tail(&(request->request.list), &(musb_ep->req_list));
1160
1161 /* it this is the head of the queue, start i/o ... */
1162 if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
1163 musb_ep_restart(musb, request);
1164
1165cleanup:
1166 spin_unlock_irqrestore(&musb->lock, lockflags);
1167 return status;
1168}
1169
1170static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1171{
1172 struct musb_ep *musb_ep = to_musb_ep(ep);
1173 struct usb_request *r;
1174 unsigned long flags;
1175 int status = 0;
1176 struct musb *musb = musb_ep->musb;
1177
1178 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1179 return -EINVAL;
1180
1181 spin_lock_irqsave(&musb->lock, flags);
1182
1183 list_for_each_entry(r, &musb_ep->req_list, list) {
1184 if (r == request)
1185 break;
1186 }
1187 if (r != request) {
1188 DBG(3, "request %p not queued to %s\n", request, ep->name);
1189 status = -EINVAL;
1190 goto done;
1191 }
1192
1193 /* if the hardware doesn't have the request, easy ... */
1194 if (musb_ep->req_list.next != &request->list || musb_ep->busy)
1195 musb_g_giveback(musb_ep, request, -ECONNRESET);
1196
1197 /* ... else abort the dma transfer ... */
1198 else if (is_dma_capable() && musb_ep->dma) {
1199 struct dma_controller *c = musb->dma_controller;
1200
1201 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1202 if (c->channel_abort)
1203 status = c->channel_abort(musb_ep->dma);
1204 else
1205 status = -EBUSY;
1206 if (status == 0)
1207 musb_g_giveback(musb_ep, request, -ECONNRESET);
1208 } else {
1209 /* NOTE: by sticking to easily tested hardware/driver states,
1210 * we leave counting of in-flight packets imprecise.
1211 */
1212 musb_g_giveback(musb_ep, request, -ECONNRESET);
1213 }
1214
1215done:
1216 spin_unlock_irqrestore(&musb->lock, flags);
1217 return status;
1218}
1219
1220/*
1221 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1222 * data but will queue requests.
1223 *
1224 * exported to ep0 code
1225 */
1226int musb_gadget_set_halt(struct usb_ep *ep, int value)
1227{
1228 struct musb_ep *musb_ep = to_musb_ep(ep);
1229 u8 epnum = musb_ep->current_epnum;
1230 struct musb *musb = musb_ep->musb;
1231 void __iomem *epio = musb->endpoints[epnum].regs;
1232 void __iomem *mbase;
1233 unsigned long flags;
1234 u16 csr;
1235 struct musb_request *request = NULL;
1236 int status = 0;
1237
1238 if (!ep)
1239 return -EINVAL;
1240 mbase = musb->mregs;
1241
1242 spin_lock_irqsave(&musb->lock, flags);
1243
1244 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1245 status = -EINVAL;
1246 goto done;
1247 }
1248
1249 musb_ep_select(mbase, epnum);
1250
1251 /* cannot portably stall with non-empty FIFO */
1252 request = to_musb_request(next_request(musb_ep));
1253 if (value && musb_ep->is_in) {
1254 csr = musb_readw(epio, MUSB_TXCSR);
1255 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1256 DBG(3, "%s fifo busy, cannot halt\n", ep->name);
1257 spin_unlock_irqrestore(&musb->lock, flags);
1258 return -EAGAIN;
1259 }
1260
1261 }
1262
1263 /* set/clear the stall and toggle bits */
1264 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1265 if (musb_ep->is_in) {
1266 csr = musb_readw(epio, MUSB_TXCSR);
1267 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
1268 csr |= MUSB_TXCSR_FLUSHFIFO;
1269 csr |= MUSB_TXCSR_P_WZC_BITS
1270 | MUSB_TXCSR_CLRDATATOG;
1271 if (value)
1272 csr |= MUSB_TXCSR_P_SENDSTALL;
1273 else
1274 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1275 | MUSB_TXCSR_P_SENTSTALL);
1276 csr &= ~MUSB_TXCSR_TXPKTRDY;
1277 musb_writew(epio, MUSB_TXCSR, csr);
1278 } else {
1279 csr = musb_readw(epio, MUSB_RXCSR);
1280 csr |= MUSB_RXCSR_P_WZC_BITS
1281 | MUSB_RXCSR_FLUSHFIFO
1282 | MUSB_RXCSR_CLRDATATOG;
1283 if (value)
1284 csr |= MUSB_RXCSR_P_SENDSTALL;
1285 else
1286 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1287 | MUSB_RXCSR_P_SENTSTALL);
1288 musb_writew(epio, MUSB_RXCSR, csr);
1289 }
1290
1291done:
1292
1293 /* maybe start the first request in the queue */
1294 if (!musb_ep->busy && !value && request) {
1295 DBG(3, "restarting the request\n");
1296 musb_ep_restart(musb, request);
1297 }
1298
1299 spin_unlock_irqrestore(&musb->lock, flags);
1300 return status;
1301}
1302
1303static int musb_gadget_fifo_status(struct usb_ep *ep)
1304{
1305 struct musb_ep *musb_ep = to_musb_ep(ep);
1306 void __iomem *epio = musb_ep->hw_ep->regs;
1307 int retval = -EINVAL;
1308
1309 if (musb_ep->desc && !musb_ep->is_in) {
1310 struct musb *musb = musb_ep->musb;
1311 int epnum = musb_ep->current_epnum;
1312 void __iomem *mbase = musb->mregs;
1313 unsigned long flags;
1314
1315 spin_lock_irqsave(&musb->lock, flags);
1316
1317 musb_ep_select(mbase, epnum);
1318 /* FIXME return zero unless RXPKTRDY is set */
1319 retval = musb_readw(epio, MUSB_RXCOUNT);
1320
1321 spin_unlock_irqrestore(&musb->lock, flags);
1322 }
1323 return retval;
1324}
1325
1326static void musb_gadget_fifo_flush(struct usb_ep *ep)
1327{
1328 struct musb_ep *musb_ep = to_musb_ep(ep);
1329 struct musb *musb = musb_ep->musb;
1330 u8 epnum = musb_ep->current_epnum;
1331 void __iomem *epio = musb->endpoints[epnum].regs;
1332 void __iomem *mbase;
1333 unsigned long flags;
1334 u16 csr, int_txe;
1335
1336 mbase = musb->mregs;
1337
1338 spin_lock_irqsave(&musb->lock, flags);
1339 musb_ep_select(mbase, (u8) epnum);
1340
1341 /* disable interrupts */
1342 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1343 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1344
1345 if (musb_ep->is_in) {
1346 csr = musb_readw(epio, MUSB_TXCSR);
1347 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1348 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1349 musb_writew(epio, MUSB_TXCSR, csr);
1350 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1351 musb_writew(epio, MUSB_TXCSR, csr);
1352 }
1353 } else {
1354 csr = musb_readw(epio, MUSB_RXCSR);
1355 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1356 musb_writew(epio, MUSB_RXCSR, csr);
1357 musb_writew(epio, MUSB_RXCSR, csr);
1358 }
1359
1360 /* re-enable interrupt */
1361 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1362 spin_unlock_irqrestore(&musb->lock, flags);
1363}
1364
1365static const struct usb_ep_ops musb_ep_ops = {
1366 .enable = musb_gadget_enable,
1367 .disable = musb_gadget_disable,
1368 .alloc_request = musb_alloc_request,
1369 .free_request = musb_free_request,
1370 .queue = musb_gadget_queue,
1371 .dequeue = musb_gadget_dequeue,
1372 .set_halt = musb_gadget_set_halt,
1373 .fifo_status = musb_gadget_fifo_status,
1374 .fifo_flush = musb_gadget_fifo_flush
1375};
1376
1377/* ----------------------------------------------------------------------- */
1378
1379static int musb_gadget_get_frame(struct usb_gadget *gadget)
1380{
1381 struct musb *musb = gadget_to_musb(gadget);
1382
1383 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1384}
1385
1386static int musb_gadget_wakeup(struct usb_gadget *gadget)
1387{
1388 struct musb *musb = gadget_to_musb(gadget);
1389 void __iomem *mregs = musb->mregs;
1390 unsigned long flags;
1391 int status = -EINVAL;
1392 u8 power, devctl;
1393 int retries;
1394
1395 spin_lock_irqsave(&musb->lock, flags);
1396
1397 switch (musb->xceiv.state) {
1398 case OTG_STATE_B_PERIPHERAL:
1399 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1400 * that's part of the standard usb 1.1 state machine, and
1401 * doesn't affect OTG transitions.
1402 */
1403 if (musb->may_wakeup && musb->is_suspended)
1404 break;
1405 goto done;
1406 case OTG_STATE_B_IDLE:
1407 /* Start SRP ... OTG not required. */
1408 devctl = musb_readb(mregs, MUSB_DEVCTL);
1409 DBG(2, "Sending SRP: devctl: %02x\n", devctl);
1410 devctl |= MUSB_DEVCTL_SESSION;
1411 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1412 devctl = musb_readb(mregs, MUSB_DEVCTL);
1413 retries = 100;
1414 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1415 devctl = musb_readb(mregs, MUSB_DEVCTL);
1416 if (retries-- < 1)
1417 break;
1418 }
1419 retries = 10000;
1420 while (devctl & MUSB_DEVCTL_SESSION) {
1421 devctl = musb_readb(mregs, MUSB_DEVCTL);
1422 if (retries-- < 1)
1423 break;
1424 }
1425
1426 /* Block idling for at least 1s */
1427 musb_platform_try_idle(musb,
1428 jiffies + msecs_to_jiffies(1 * HZ));
1429
1430 status = 0;
1431 goto done;
1432 default:
1433 DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
1434 goto done;
1435 }
1436
1437 status = 0;
1438
1439 power = musb_readb(mregs, MUSB_POWER);
1440 power |= MUSB_POWER_RESUME;
1441 musb_writeb(mregs, MUSB_POWER, power);
1442 DBG(2, "issue wakeup\n");
1443
1444 /* FIXME do this next chunk in a timer callback, no udelay */
1445 mdelay(2);
1446
1447 power = musb_readb(mregs, MUSB_POWER);
1448 power &= ~MUSB_POWER_RESUME;
1449 musb_writeb(mregs, MUSB_POWER, power);
1450done:
1451 spin_unlock_irqrestore(&musb->lock, flags);
1452 return status;
1453}
1454
1455static int
1456musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1457{
1458 struct musb *musb = gadget_to_musb(gadget);
1459
1460 musb->is_self_powered = !!is_selfpowered;
1461 return 0;
1462}
1463
1464static void musb_pullup(struct musb *musb, int is_on)
1465{
1466 u8 power;
1467
1468 power = musb_readb(musb->mregs, MUSB_POWER);
1469 if (is_on)
1470 power |= MUSB_POWER_SOFTCONN;
1471 else
1472 power &= ~MUSB_POWER_SOFTCONN;
1473
1474 /* FIXME if on, HdrcStart; if off, HdrcStop */
1475
1476 DBG(3, "gadget %s D+ pullup %s\n",
1477 musb->gadget_driver->function, is_on ? "on" : "off");
1478 musb_writeb(musb->mregs, MUSB_POWER, power);
1479}
1480
1481#if 0
1482static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1483{
1484 DBG(2, "<= %s =>\n", __func__);
1485
1486 /*
1487 * FIXME iff driver's softconnect flag is set (as it is during probe,
1488 * though that can clear it), just musb_pullup().
1489 */
1490
1491 return -EINVAL;
1492}
1493#endif
1494
1495static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1496{
1497 struct musb *musb = gadget_to_musb(gadget);
1498
1499 if (!musb->xceiv.set_power)
1500 return -EOPNOTSUPP;
1501 return otg_set_power(&musb->xceiv, mA);
1502}
1503
1504static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1505{
1506 struct musb *musb = gadget_to_musb(gadget);
1507 unsigned long flags;
1508
1509 is_on = !!is_on;
1510
1511 /* NOTE: this assumes we are sensing vbus; we'd rather
1512 * not pullup unless the B-session is active.
1513 */
1514 spin_lock_irqsave(&musb->lock, flags);
1515 if (is_on != musb->softconnect) {
1516 musb->softconnect = is_on;
1517 musb_pullup(musb, is_on);
1518 }
1519 spin_unlock_irqrestore(&musb->lock, flags);
1520 return 0;
1521}
1522
1523static const struct usb_gadget_ops musb_gadget_operations = {
1524 .get_frame = musb_gadget_get_frame,
1525 .wakeup = musb_gadget_wakeup,
1526 .set_selfpowered = musb_gadget_set_self_powered,
1527 /* .vbus_session = musb_gadget_vbus_session, */
1528 .vbus_draw = musb_gadget_vbus_draw,
1529 .pullup = musb_gadget_pullup,
1530};
1531
1532/* ----------------------------------------------------------------------- */
1533
1534/* Registration */
1535
1536/* Only this registration code "knows" the rule (from USB standards)
1537 * about there being only one external upstream port. It assumes
1538 * all peripheral ports are external...
1539 */
1540static struct musb *the_gadget;
1541
1542static void musb_gadget_release(struct device *dev)
1543{
1544 /* kref_put(WHAT) */
1545 dev_dbg(dev, "%s\n", __func__);
1546}
1547
1548
1549static void __init
1550init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1551{
1552 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1553
1554 memset(ep, 0, sizeof *ep);
1555
1556 ep->current_epnum = epnum;
1557 ep->musb = musb;
1558 ep->hw_ep = hw_ep;
1559 ep->is_in = is_in;
1560
1561 INIT_LIST_HEAD(&ep->req_list);
1562
1563 sprintf(ep->name, "ep%d%s", epnum,
1564 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1565 is_in ? "in" : "out"));
1566 ep->end_point.name = ep->name;
1567 INIT_LIST_HEAD(&ep->end_point.ep_list);
1568 if (!epnum) {
1569 ep->end_point.maxpacket = 64;
1570 ep->end_point.ops = &musb_g_ep0_ops;
1571 musb->g.ep0 = &ep->end_point;
1572 } else {
1573 if (is_in)
1574 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1575 else
1576 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1577 ep->end_point.ops = &musb_ep_ops;
1578 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1579 }
1580}
1581
1582/*
1583 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1584 * to the rest of the driver state.
1585 */
1586static inline void __init musb_g_init_endpoints(struct musb *musb)
1587{
1588 u8 epnum;
1589 struct musb_hw_ep *hw_ep;
1590 unsigned count = 0;
1591
1592 /* intialize endpoint list just once */
1593 INIT_LIST_HEAD(&(musb->g.ep_list));
1594
1595 for (epnum = 0, hw_ep = musb->endpoints;
1596 epnum < musb->nr_endpoints;
1597 epnum++, hw_ep++) {
1598 if (hw_ep->is_shared_fifo /* || !epnum */) {
1599 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1600 count++;
1601 } else {
1602 if (hw_ep->max_packet_sz_tx) {
1603 init_peripheral_ep(musb, &hw_ep->ep_in,
1604 epnum, 1);
1605 count++;
1606 }
1607 if (hw_ep->max_packet_sz_rx) {
1608 init_peripheral_ep(musb, &hw_ep->ep_out,
1609 epnum, 0);
1610 count++;
1611 }
1612 }
1613 }
1614}
1615
1616/* called once during driver setup to initialize and link into
1617 * the driver model; memory is zeroed.
1618 */
1619int __init musb_gadget_setup(struct musb *musb)
1620{
1621 int status;
1622
1623 /* REVISIT minor race: if (erroneously) setting up two
1624 * musb peripherals at the same time, only the bus lock
1625 * is probably held.
1626 */
1627 if (the_gadget)
1628 return -EBUSY;
1629 the_gadget = musb;
1630
1631 musb->g.ops = &musb_gadget_operations;
1632 musb->g.is_dualspeed = 1;
1633 musb->g.speed = USB_SPEED_UNKNOWN;
1634
1635 /* this "gadget" abstracts/virtualizes the controller */
1636 strcpy(musb->g.dev.bus_id, "gadget");
1637 musb->g.dev.parent = musb->controller;
1638 musb->g.dev.dma_mask = musb->controller->dma_mask;
1639 musb->g.dev.release = musb_gadget_release;
1640 musb->g.name = musb_driver_name;
1641
1642 if (is_otg_enabled(musb))
1643 musb->g.is_otg = 1;
1644
1645 musb_g_init_endpoints(musb);
1646
1647 musb->is_active = 0;
1648 musb_platform_try_idle(musb, 0);
1649
1650 status = device_register(&musb->g.dev);
1651 if (status != 0)
1652 the_gadget = NULL;
1653 return status;
1654}
1655
1656void musb_gadget_cleanup(struct musb *musb)
1657{
1658 if (musb != the_gadget)
1659 return;
1660
1661 device_unregister(&musb->g.dev);
1662 the_gadget = NULL;
1663}
1664
1665/*
1666 * Register the gadget driver. Used by gadget drivers when
1667 * registering themselves with the controller.
1668 *
1669 * -EINVAL something went wrong (not driver)
1670 * -EBUSY another gadget is already using the controller
1671 * -ENOMEM no memeory to perform the operation
1672 *
1673 * @param driver the gadget driver
1674 * @return <0 if error, 0 if everything is fine
1675 */
1676int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1677{
1678 int retval;
1679 unsigned long flags;
1680 struct musb *musb = the_gadget;
1681
1682 if (!driver
1683 || driver->speed != USB_SPEED_HIGH
1684 || !driver->bind
1685 || !driver->setup)
1686 return -EINVAL;
1687
1688 /* driver must be initialized to support peripheral mode */
1689 if (!musb || !(musb->board_mode == MUSB_OTG
1690 || musb->board_mode != MUSB_OTG)) {
1691 DBG(1, "%s, no dev??\n", __func__);
1692 return -ENODEV;
1693 }
1694
1695 DBG(3, "registering driver %s\n", driver->function);
1696 spin_lock_irqsave(&musb->lock, flags);
1697
1698 if (musb->gadget_driver) {
1699 DBG(1, "%s is already bound to %s\n",
1700 musb_driver_name,
1701 musb->gadget_driver->driver.name);
1702 retval = -EBUSY;
1703 } else {
1704 musb->gadget_driver = driver;
1705 musb->g.dev.driver = &driver->driver;
1706 driver->driver.bus = NULL;
1707 musb->softconnect = 1;
1708 retval = 0;
1709 }
1710
1711 spin_unlock_irqrestore(&musb->lock, flags);
1712
1713 if (retval == 0) {
1714 retval = driver->bind(&musb->g);
1715 if (retval != 0) {
1716 DBG(3, "bind to driver %s failed --> %d\n",
1717 driver->driver.name, retval);
1718 musb->gadget_driver = NULL;
1719 musb->g.dev.driver = NULL;
1720 }
1721
1722 spin_lock_irqsave(&musb->lock, flags);
1723
1724 /* REVISIT always use otg_set_peripheral(), handling
1725 * issues including the root hub one below ...
1726 */
1727 musb->xceiv.gadget = &musb->g;
1728 musb->xceiv.state = OTG_STATE_B_IDLE;
1729 musb->is_active = 1;
1730
1731 /* FIXME this ignores the softconnect flag. Drivers are
1732 * allowed hold the peripheral inactive until for example
1733 * userspace hooks up printer hardware or DSP codecs, so
1734 * hosts only see fully functional devices.
1735 */
1736
1737 if (!is_otg_enabled(musb))
1738 musb_start(musb);
1739
1740 spin_unlock_irqrestore(&musb->lock, flags);
1741
1742 if (is_otg_enabled(musb)) {
1743 DBG(3, "OTG startup...\n");
1744
1745 /* REVISIT: funcall to other code, which also
1746 * handles power budgeting ... this way also
1747 * ensures HdrcStart is indirectly called.
1748 */
1749 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1750 if (retval < 0) {
1751 DBG(1, "add_hcd failed, %d\n", retval);
1752 spin_lock_irqsave(&musb->lock, flags);
1753 musb->xceiv.gadget = NULL;
1754 musb->xceiv.state = OTG_STATE_UNDEFINED;
1755 musb->gadget_driver = NULL;
1756 musb->g.dev.driver = NULL;
1757 spin_unlock_irqrestore(&musb->lock, flags);
1758 }
1759 }
1760 }
1761
1762 return retval;
1763}
1764EXPORT_SYMBOL(usb_gadget_register_driver);
1765
1766static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1767{
1768 int i;
1769 struct musb_hw_ep *hw_ep;
1770
1771 /* don't disconnect if it's not connected */
1772 if (musb->g.speed == USB_SPEED_UNKNOWN)
1773 driver = NULL;
1774 else
1775 musb->g.speed = USB_SPEED_UNKNOWN;
1776
1777 /* deactivate the hardware */
1778 if (musb->softconnect) {
1779 musb->softconnect = 0;
1780 musb_pullup(musb, 0);
1781 }
1782 musb_stop(musb);
1783
1784 /* killing any outstanding requests will quiesce the driver;
1785 * then report disconnect
1786 */
1787 if (driver) {
1788 for (i = 0, hw_ep = musb->endpoints;
1789 i < musb->nr_endpoints;
1790 i++, hw_ep++) {
1791 musb_ep_select(musb->mregs, i);
1792 if (hw_ep->is_shared_fifo /* || !epnum */) {
1793 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1794 } else {
1795 if (hw_ep->max_packet_sz_tx)
1796 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1797 if (hw_ep->max_packet_sz_rx)
1798 nuke(&hw_ep->ep_out, -ESHUTDOWN);
1799 }
1800 }
1801
1802 spin_unlock(&musb->lock);
1803 driver->disconnect(&musb->g);
1804 spin_lock(&musb->lock);
1805 }
1806}
1807
1808/*
1809 * Unregister the gadget driver. Used by gadget drivers when
1810 * unregistering themselves from the controller.
1811 *
1812 * @param driver the gadget driver to unregister
1813 */
1814int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1815{
1816 unsigned long flags;
1817 int retval = 0;
1818 struct musb *musb = the_gadget;
1819
1820 if (!driver || !driver->unbind || !musb)
1821 return -EINVAL;
1822
1823 /* REVISIT always use otg_set_peripheral() here too;
1824 * this needs to shut down the OTG engine.
1825 */
1826
1827 spin_lock_irqsave(&musb->lock, flags);
1828
1829#ifdef CONFIG_USB_MUSB_OTG
1830 musb_hnp_stop(musb);
1831#endif
1832
1833 if (musb->gadget_driver == driver) {
1834
1835 (void) musb_gadget_vbus_draw(&musb->g, 0);
1836
1837 musb->xceiv.state = OTG_STATE_UNDEFINED;
1838 stop_activity(musb, driver);
1839
1840 DBG(3, "unregistering driver %s\n", driver->function);
1841 spin_unlock_irqrestore(&musb->lock, flags);
1842 driver->unbind(&musb->g);
1843 spin_lock_irqsave(&musb->lock, flags);
1844
1845 musb->gadget_driver = NULL;
1846 musb->g.dev.driver = NULL;
1847
1848 musb->is_active = 0;
1849 musb_platform_try_idle(musb, 0);
1850 } else
1851 retval = -EINVAL;
1852 spin_unlock_irqrestore(&musb->lock, flags);
1853
1854 if (is_otg_enabled(musb) && retval == 0) {
1855 usb_remove_hcd(musb_to_hcd(musb));
1856 /* FIXME we need to be able to register another
1857 * gadget driver here and have everything work;
1858 * that currently misbehaves.
1859 */
1860 }
1861
1862 return retval;
1863}
1864EXPORT_SYMBOL(usb_gadget_unregister_driver);
1865
1866
1867/* ----------------------------------------------------------------------- */
1868
1869/* lifecycle operations called through plat_uds.c */
1870
1871void musb_g_resume(struct musb *musb)
1872{
1873 musb->is_suspended = 0;
1874 switch (musb->xceiv.state) {
1875 case OTG_STATE_B_IDLE:
1876 break;
1877 case OTG_STATE_B_WAIT_ACON:
1878 case OTG_STATE_B_PERIPHERAL:
1879 musb->is_active = 1;
1880 if (musb->gadget_driver && musb->gadget_driver->resume) {
1881 spin_unlock(&musb->lock);
1882 musb->gadget_driver->resume(&musb->g);
1883 spin_lock(&musb->lock);
1884 }
1885 break;
1886 default:
1887 WARNING("unhandled RESUME transition (%s)\n",
1888 otg_state_string(musb));
1889 }
1890}
1891
1892/* called when SOF packets stop for 3+ msec */
1893void musb_g_suspend(struct musb *musb)
1894{
1895 u8 devctl;
1896
1897 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1898 DBG(3, "devctl %02x\n", devctl);
1899
1900 switch (musb->xceiv.state) {
1901 case OTG_STATE_B_IDLE:
1902 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1903 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
1904 break;
1905 case OTG_STATE_B_PERIPHERAL:
1906 musb->is_suspended = 1;
1907 if (musb->gadget_driver && musb->gadget_driver->suspend) {
1908 spin_unlock(&musb->lock);
1909 musb->gadget_driver->suspend(&musb->g);
1910 spin_lock(&musb->lock);
1911 }
1912 break;
1913 default:
1914 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1915 * A_PERIPHERAL may need care too
1916 */
1917 WARNING("unhandled SUSPEND transition (%s)\n",
1918 otg_state_string(musb));
1919 }
1920}
1921
1922/* Called during SRP */
1923void musb_g_wakeup(struct musb *musb)
1924{
1925 musb_gadget_wakeup(&musb->g);
1926}
1927
1928/* called when VBUS drops below session threshold, and in other cases */
1929void musb_g_disconnect(struct musb *musb)
1930{
1931 void __iomem *mregs = musb->mregs;
1932 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
1933
1934 DBG(3, "devctl %02x\n", devctl);
1935
1936 /* clear HR */
1937 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
1938
1939 /* don't draw vbus until new b-default session */
1940 (void) musb_gadget_vbus_draw(&musb->g, 0);
1941
1942 musb->g.speed = USB_SPEED_UNKNOWN;
1943 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
1944 spin_unlock(&musb->lock);
1945 musb->gadget_driver->disconnect(&musb->g);
1946 spin_lock(&musb->lock);
1947 }
1948
1949 switch (musb->xceiv.state) {
1950 default:
1951#ifdef CONFIG_USB_MUSB_OTG
1952 DBG(2, "Unhandled disconnect %s, setting a_idle\n",
1953 otg_state_string(musb));
1954 musb->xceiv.state = OTG_STATE_A_IDLE;
1955 break;
1956 case OTG_STATE_A_PERIPHERAL:
1957 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
1958 break;
1959 case OTG_STATE_B_WAIT_ACON:
1960 case OTG_STATE_B_HOST:
1961#endif
1962 case OTG_STATE_B_PERIPHERAL:
1963 case OTG_STATE_B_IDLE:
1964 musb->xceiv.state = OTG_STATE_B_IDLE;
1965 break;
1966 case OTG_STATE_B_SRP_INIT:
1967 break;
1968 }
1969
1970 musb->is_active = 0;
1971}
1972
1973void musb_g_reset(struct musb *musb)
1974__releases(musb->lock)
1975__acquires(musb->lock)
1976{
1977 void __iomem *mbase = musb->mregs;
1978 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
1979 u8 power;
1980
1981 DBG(3, "<== %s addr=%x driver '%s'\n",
1982 (devctl & MUSB_DEVCTL_BDEVICE)
1983 ? "B-Device" : "A-Device",
1984 musb_readb(mbase, MUSB_FADDR),
1985 musb->gadget_driver
1986 ? musb->gadget_driver->driver.name
1987 : NULL
1988 );
1989
1990 /* report disconnect, if we didn't already (flushing EP state) */
1991 if (musb->g.speed != USB_SPEED_UNKNOWN)
1992 musb_g_disconnect(musb);
1993
1994 /* clear HR */
1995 else if (devctl & MUSB_DEVCTL_HR)
1996 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
1997
1998
1999 /* what speed did we negotiate? */
2000 power = musb_readb(mbase, MUSB_POWER);
2001 musb->g.speed = (power & MUSB_POWER_HSMODE)
2002 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2003
2004 /* start in USB_STATE_DEFAULT */
2005 musb->is_active = 1;
2006 musb->is_suspended = 0;
2007 MUSB_DEV_MODE(musb);
2008 musb->address = 0;
2009 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2010
2011 musb->may_wakeup = 0;
2012 musb->g.b_hnp_enable = 0;
2013 musb->g.a_alt_hnp_support = 0;
2014 musb->g.a_hnp_support = 0;
2015
2016 /* Normal reset, as B-Device;
2017 * or else after HNP, as A-Device
2018 */
2019 if (devctl & MUSB_DEVCTL_BDEVICE) {
2020 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
2021 musb->g.is_a_peripheral = 0;
2022 } else if (is_otg_enabled(musb)) {
2023 musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
2024 musb->g.is_a_peripheral = 1;
2025 } else
2026 WARN_ON(1);
2027
2028 /* start with default limits on VBUS power draw */
2029 (void) musb_gadget_vbus_draw(&musb->g,
2030 is_otg_enabled(musb) ? 8 : 100);
2031}
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
new file mode 100644
index 000000000000..59502da9f739
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.h
@@ -0,0 +1,108 @@
1/*
2 * MUSB OTG driver peripheral defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_GADGET_H
36#define __MUSB_GADGET_H
37
38struct musb_request {
39 struct usb_request request;
40 struct musb_ep *ep;
41 struct musb *musb;
42 u8 tx; /* endpoint direction */
43 u8 epnum;
44 u8 mapped;
45};
46
47static inline struct musb_request *to_musb_request(struct usb_request *req)
48{
49 return req ? container_of(req, struct musb_request, request) : NULL;
50}
51
52extern struct usb_request *
53musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
54extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
55
56
57/*
58 * struct musb_ep - peripheral side view of endpoint rx or tx side
59 */
60struct musb_ep {
61 /* stuff towards the head is basically write-once. */
62 struct usb_ep end_point;
63 char name[12];
64 struct musb_hw_ep *hw_ep;
65 struct musb *musb;
66 u8 current_epnum;
67
68 /* ... when enabled/disabled ... */
69 u8 type;
70 u8 is_in;
71 u16 packet_sz;
72 const struct usb_endpoint_descriptor *desc;
73 struct dma_channel *dma;
74
75 /* later things are modified based on usage */
76 struct list_head req_list;
77
78 /* true if lock must be dropped but req_list may not be advanced */
79 u8 busy;
80};
81
82static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
83{
84 return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
85}
86
87static inline struct usb_request *next_request(struct musb_ep *ep)
88{
89 struct list_head *queue = &ep->req_list;
90
91 if (list_empty(queue))
92 return NULL;
93 return container_of(queue->next, struct usb_request, list);
94}
95
96extern void musb_g_tx(struct musb *musb, u8 epnum);
97extern void musb_g_rx(struct musb *musb, u8 epnum);
98
99extern const struct usb_ep_ops musb_g_ep0_ops;
100
101extern int musb_gadget_setup(struct musb *);
102extern void musb_gadget_cleanup(struct musb *);
103
104extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
105
106extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
107
108#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
new file mode 100644
index 000000000000..48d7d3ccb243
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -0,0 +1,981 @@
1/*
2 * MUSB OTG peripheral driver ep0 handling
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/list.h>
37#include <linux/timer.h>
38#include <linux/spinlock.h>
39#include <linux/init.h>
40#include <linux/device.h>
41#include <linux/interrupt.h>
42
43#include "musb_core.h"
44
45/* ep0 is always musb->endpoints[0].ep_in */
46#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
47
48/*
49 * locking note: we use only the controller lock, for simpler correctness.
50 * It's always held with IRQs blocked.
51 *
52 * It protects the ep0 request queue as well as ep0_state, not just the
53 * controller and indexed registers. And that lock stays held unless it
54 * needs to be dropped to allow reentering this driver ... like upcalls to
55 * the gadget driver, or adjusting endpoint halt status.
56 */
57
58static char *decode_ep0stage(u8 stage)
59{
60 switch (stage) {
61 case MUSB_EP0_STAGE_SETUP: return "idle";
62 case MUSB_EP0_STAGE_TX: return "in";
63 case MUSB_EP0_STAGE_RX: return "out";
64 case MUSB_EP0_STAGE_ACKWAIT: return "wait";
65 case MUSB_EP0_STAGE_STATUSIN: return "in/status";
66 case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
67 default: return "?";
68 }
69}
70
71/* handle a standard GET_STATUS request
72 * Context: caller holds controller lock
73 */
74static int service_tx_status_request(
75 struct musb *musb,
76 const struct usb_ctrlrequest *ctrlrequest)
77{
78 void __iomem *mbase = musb->mregs;
79 int handled = 1;
80 u8 result[2], epnum = 0;
81 const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
82
83 result[1] = 0;
84
85 switch (recip) {
86 case USB_RECIP_DEVICE:
87 result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
88 result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
89#ifdef CONFIG_USB_MUSB_OTG
90 if (musb->g.is_otg) {
91 result[0] |= musb->g.b_hnp_enable
92 << USB_DEVICE_B_HNP_ENABLE;
93 result[0] |= musb->g.a_alt_hnp_support
94 << USB_DEVICE_A_ALT_HNP_SUPPORT;
95 result[0] |= musb->g.a_hnp_support
96 << USB_DEVICE_A_HNP_SUPPORT;
97 }
98#endif
99 break;
100
101 case USB_RECIP_INTERFACE:
102 result[0] = 0;
103 break;
104
105 case USB_RECIP_ENDPOINT: {
106 int is_in;
107 struct musb_ep *ep;
108 u16 tmp;
109 void __iomem *regs;
110
111 epnum = (u8) ctrlrequest->wIndex;
112 if (!epnum) {
113 result[0] = 0;
114 break;
115 }
116
117 is_in = epnum & USB_DIR_IN;
118 if (is_in) {
119 epnum &= 0x0f;
120 ep = &musb->endpoints[epnum].ep_in;
121 } else {
122 ep = &musb->endpoints[epnum].ep_out;
123 }
124 regs = musb->endpoints[epnum].regs;
125
126 if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
127 handled = -EINVAL;
128 break;
129 }
130
131 musb_ep_select(mbase, epnum);
132 if (is_in)
133 tmp = musb_readw(regs, MUSB_TXCSR)
134 & MUSB_TXCSR_P_SENDSTALL;
135 else
136 tmp = musb_readw(regs, MUSB_RXCSR)
137 & MUSB_RXCSR_P_SENDSTALL;
138 musb_ep_select(mbase, 0);
139
140 result[0] = tmp ? 1 : 0;
141 } break;
142
143 default:
144 /* class, vendor, etc ... delegate */
145 handled = 0;
146 break;
147 }
148
149 /* fill up the fifo; caller updates csr0 */
150 if (handled > 0) {
151 u16 len = le16_to_cpu(ctrlrequest->wLength);
152
153 if (len > 2)
154 len = 2;
155 musb_write_fifo(&musb->endpoints[0], len, result);
156 }
157
158 return handled;
159}
160
161/*
162 * handle a control-IN request, the end0 buffer contains the current request
163 * that is supposed to be a standard control request. Assumes the fifo to
164 * be at least 2 bytes long.
165 *
166 * @return 0 if the request was NOT HANDLED,
167 * < 0 when error
168 * > 0 when the request is processed
169 *
170 * Context: caller holds controller lock
171 */
172static int
173service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
174{
175 int handled = 0; /* not handled */
176
177 if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
178 == USB_TYPE_STANDARD) {
179 switch (ctrlrequest->bRequest) {
180 case USB_REQ_GET_STATUS:
181 handled = service_tx_status_request(musb,
182 ctrlrequest);
183 break;
184
185 /* case USB_REQ_SYNC_FRAME: */
186
187 default:
188 break;
189 }
190 }
191 return handled;
192}
193
194/*
195 * Context: caller holds controller lock
196 */
197static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
198{
199 musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
200 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
201}
202
203/*
204 * Tries to start B-device HNP negotiation if enabled via sysfs
205 */
206static inline void musb_try_b_hnp_enable(struct musb *musb)
207{
208 void __iomem *mbase = musb->mregs;
209 u8 devctl;
210
211 DBG(1, "HNP: Setting HR\n");
212 devctl = musb_readb(mbase, MUSB_DEVCTL);
213 musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
214}
215
216/*
217 * Handle all control requests with no DATA stage, including standard
218 * requests such as:
219 * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
220 * always delegated to the gadget driver
221 * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
222 * always handled here, except for class/vendor/... features
223 *
224 * Context: caller holds controller lock
225 */
226static int
227service_zero_data_request(struct musb *musb,
228 struct usb_ctrlrequest *ctrlrequest)
229__releases(musb->lock)
230__acquires(musb->lock)
231{
232 int handled = -EINVAL;
233 void __iomem *mbase = musb->mregs;
234 const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
235
236 /* the gadget driver handles everything except what we MUST handle */
237 if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
238 == USB_TYPE_STANDARD) {
239 switch (ctrlrequest->bRequest) {
240 case USB_REQ_SET_ADDRESS:
241 /* change it after the status stage */
242 musb->set_address = true;
243 musb->address = (u8) (ctrlrequest->wValue & 0x7f);
244 handled = 1;
245 break;
246
247 case USB_REQ_CLEAR_FEATURE:
248 switch (recip) {
249 case USB_RECIP_DEVICE:
250 if (ctrlrequest->wValue
251 != USB_DEVICE_REMOTE_WAKEUP)
252 break;
253 musb->may_wakeup = 0;
254 handled = 1;
255 break;
256 case USB_RECIP_INTERFACE:
257 break;
258 case USB_RECIP_ENDPOINT:{
259 const u8 num = ctrlrequest->wIndex & 0x0f;
260 struct musb_ep *musb_ep;
261
262 if (num == 0
263 || num >= MUSB_C_NUM_EPS
264 || ctrlrequest->wValue
265 != USB_ENDPOINT_HALT)
266 break;
267
268 if (ctrlrequest->wIndex & USB_DIR_IN)
269 musb_ep = &musb->endpoints[num].ep_in;
270 else
271 musb_ep = &musb->endpoints[num].ep_out;
272 if (!musb_ep->desc)
273 break;
274
275 /* REVISIT do it directly, no locking games */
276 spin_unlock(&musb->lock);
277 musb_gadget_set_halt(&musb_ep->end_point, 0);
278 spin_lock(&musb->lock);
279
280 /* select ep0 again */
281 musb_ep_select(mbase, 0);
282 handled = 1;
283 } break;
284 default:
285 /* class, vendor, etc ... delegate */
286 handled = 0;
287 break;
288 }
289 break;
290
291 case USB_REQ_SET_FEATURE:
292 switch (recip) {
293 case USB_RECIP_DEVICE:
294 handled = 1;
295 switch (ctrlrequest->wValue) {
296 case USB_DEVICE_REMOTE_WAKEUP:
297 musb->may_wakeup = 1;
298 break;
299 case USB_DEVICE_TEST_MODE:
300 if (musb->g.speed != USB_SPEED_HIGH)
301 goto stall;
302 if (ctrlrequest->wIndex & 0xff)
303 goto stall;
304
305 switch (ctrlrequest->wIndex >> 8) {
306 case 1:
307 pr_debug("TEST_J\n");
308 /* TEST_J */
309 musb->test_mode_nr =
310 MUSB_TEST_J;
311 break;
312 case 2:
313 /* TEST_K */
314 pr_debug("TEST_K\n");
315 musb->test_mode_nr =
316 MUSB_TEST_K;
317 break;
318 case 3:
319 /* TEST_SE0_NAK */
320 pr_debug("TEST_SE0_NAK\n");
321 musb->test_mode_nr =
322 MUSB_TEST_SE0_NAK;
323 break;
324 case 4:
325 /* TEST_PACKET */
326 pr_debug("TEST_PACKET\n");
327 musb->test_mode_nr =
328 MUSB_TEST_PACKET;
329 break;
330 default:
331 goto stall;
332 }
333
334 /* enter test mode after irq */
335 if (handled > 0)
336 musb->test_mode = true;
337 break;
338#ifdef CONFIG_USB_MUSB_OTG
339 case USB_DEVICE_B_HNP_ENABLE:
340 if (!musb->g.is_otg)
341 goto stall;
342 musb->g.b_hnp_enable = 1;
343 musb_try_b_hnp_enable(musb);
344 break;
345 case USB_DEVICE_A_HNP_SUPPORT:
346 if (!musb->g.is_otg)
347 goto stall;
348 musb->g.a_hnp_support = 1;
349 break;
350 case USB_DEVICE_A_ALT_HNP_SUPPORT:
351 if (!musb->g.is_otg)
352 goto stall;
353 musb->g.a_alt_hnp_support = 1;
354 break;
355#endif
356stall:
357 default:
358 handled = -EINVAL;
359 break;
360 }
361 break;
362
363 case USB_RECIP_INTERFACE:
364 break;
365
366 case USB_RECIP_ENDPOINT:{
367 const u8 epnum =
368 ctrlrequest->wIndex & 0x0f;
369 struct musb_ep *musb_ep;
370 struct musb_hw_ep *ep;
371 void __iomem *regs;
372 int is_in;
373 u16 csr;
374
375 if (epnum == 0
376 || epnum >= MUSB_C_NUM_EPS
377 || ctrlrequest->wValue
378 != USB_ENDPOINT_HALT)
379 break;
380
381 ep = musb->endpoints + epnum;
382 regs = ep->regs;
383 is_in = ctrlrequest->wIndex & USB_DIR_IN;
384 if (is_in)
385 musb_ep = &ep->ep_in;
386 else
387 musb_ep = &ep->ep_out;
388 if (!musb_ep->desc)
389 break;
390
391 musb_ep_select(mbase, epnum);
392 if (is_in) {
393 csr = musb_readw(regs,
394 MUSB_TXCSR);
395 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
396 csr |= MUSB_TXCSR_FLUSHFIFO;
397 csr |= MUSB_TXCSR_P_SENDSTALL
398 | MUSB_TXCSR_CLRDATATOG
399 | MUSB_TXCSR_P_WZC_BITS;
400 musb_writew(regs, MUSB_TXCSR,
401 csr);
402 } else {
403 csr = musb_readw(regs,
404 MUSB_RXCSR);
405 csr |= MUSB_RXCSR_P_SENDSTALL
406 | MUSB_RXCSR_FLUSHFIFO
407 | MUSB_RXCSR_CLRDATATOG
408 | MUSB_TXCSR_P_WZC_BITS;
409 musb_writew(regs, MUSB_RXCSR,
410 csr);
411 }
412
413 /* select ep0 again */
414 musb_ep_select(mbase, 0);
415 handled = 1;
416 } break;
417
418 default:
419 /* class, vendor, etc ... delegate */
420 handled = 0;
421 break;
422 }
423 break;
424 default:
425 /* delegate SET_CONFIGURATION, etc */
426 handled = 0;
427 }
428 } else
429 handled = 0;
430 return handled;
431}
432
433/* we have an ep0out data packet
434 * Context: caller holds controller lock
435 */
436static void ep0_rxstate(struct musb *musb)
437{
438 void __iomem *regs = musb->control_ep->regs;
439 struct usb_request *req;
440 u16 tmp;
441
442 req = next_ep0_request(musb);
443
444 /* read packet and ack; or stall because of gadget driver bug:
445 * should have provided the rx buffer before setup() returned.
446 */
447 if (req) {
448 void *buf = req->buf + req->actual;
449 unsigned len = req->length - req->actual;
450
451 /* read the buffer */
452 tmp = musb_readb(regs, MUSB_COUNT0);
453 if (tmp > len) {
454 req->status = -EOVERFLOW;
455 tmp = len;
456 }
457 musb_read_fifo(&musb->endpoints[0], tmp, buf);
458 req->actual += tmp;
459 tmp = MUSB_CSR0_P_SVDRXPKTRDY;
460 if (tmp < 64 || req->actual == req->length) {
461 musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
462 tmp |= MUSB_CSR0_P_DATAEND;
463 } else
464 req = NULL;
465 } else
466 tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
467
468
469 /* Completion handler may choose to stall, e.g. because the
470 * message just received holds invalid data.
471 */
472 if (req) {
473 musb->ackpend = tmp;
474 musb_g_ep0_giveback(musb, req);
475 if (!musb->ackpend)
476 return;
477 musb->ackpend = 0;
478 }
479 musb_writew(regs, MUSB_CSR0, tmp);
480}
481
482/*
483 * transmitting to the host (IN), this code might be called from IRQ
484 * and from kernel thread.
485 *
486 * Context: caller holds controller lock
487 */
488static void ep0_txstate(struct musb *musb)
489{
490 void __iomem *regs = musb->control_ep->regs;
491 struct usb_request *request = next_ep0_request(musb);
492 u16 csr = MUSB_CSR0_TXPKTRDY;
493 u8 *fifo_src;
494 u8 fifo_count;
495
496 if (!request) {
497 /* WARN_ON(1); */
498 DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
499 return;
500 }
501
502 /* load the data */
503 fifo_src = (u8 *) request->buf + request->actual;
504 fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
505 request->length - request->actual);
506 musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
507 request->actual += fifo_count;
508
509 /* update the flags */
510 if (fifo_count < MUSB_MAX_END0_PACKET
511 || request->actual == request->length) {
512 musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
513 csr |= MUSB_CSR0_P_DATAEND;
514 } else
515 request = NULL;
516
517 /* report completions as soon as the fifo's loaded; there's no
518 * win in waiting till this last packet gets acked. (other than
519 * very precise fault reporting, needed by USB TMC; possible with
520 * this hardware, but not usable from portable gadget drivers.)
521 */
522 if (request) {
523 musb->ackpend = csr;
524 musb_g_ep0_giveback(musb, request);
525 if (!musb->ackpend)
526 return;
527 musb->ackpend = 0;
528 }
529
530 /* send it out, triggering a "txpktrdy cleared" irq */
531 musb_writew(regs, MUSB_CSR0, csr);
532}
533
534/*
535 * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
536 * Fields are left in USB byte-order.
537 *
538 * Context: caller holds controller lock.
539 */
540static void
541musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
542{
543 struct usb_request *r;
544 void __iomem *regs = musb->control_ep->regs;
545
546 musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
547
548 /* NOTE: earlier 2.6 versions changed setup packets to host
549 * order, but now USB packets always stay in USB byte order.
550 */
551 DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
552 req->bRequestType,
553 req->bRequest,
554 le16_to_cpu(req->wValue),
555 le16_to_cpu(req->wIndex),
556 le16_to_cpu(req->wLength));
557
558 /* clean up any leftover transfers */
559 r = next_ep0_request(musb);
560 if (r)
561 musb_g_ep0_giveback(musb, r);
562
563 /* For zero-data requests we want to delay the STATUS stage to
564 * avoid SETUPEND errors. If we read data (OUT), delay accepting
565 * packets until there's a buffer to store them in.
566 *
567 * If we write data, the controller acts happier if we enable
568 * the TX FIFO right away, and give the controller a moment
569 * to switch modes...
570 */
571 musb->set_address = false;
572 musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
573 if (req->wLength == 0) {
574 if (req->bRequestType & USB_DIR_IN)
575 musb->ackpend |= MUSB_CSR0_TXPKTRDY;
576 musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
577 } else if (req->bRequestType & USB_DIR_IN) {
578 musb->ep0_state = MUSB_EP0_STAGE_TX;
579 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
580 while ((musb_readw(regs, MUSB_CSR0)
581 & MUSB_CSR0_RXPKTRDY) != 0)
582 cpu_relax();
583 musb->ackpend = 0;
584 } else
585 musb->ep0_state = MUSB_EP0_STAGE_RX;
586}
587
588static int
589forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
590__releases(musb->lock)
591__acquires(musb->lock)
592{
593 int retval;
594 if (!musb->gadget_driver)
595 return -EOPNOTSUPP;
596 spin_unlock(&musb->lock);
597 retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
598 spin_lock(&musb->lock);
599 return retval;
600}
601
602/*
603 * Handle peripheral ep0 interrupt
604 *
605 * Context: irq handler; we won't re-enter the driver that way.
606 */
607irqreturn_t musb_g_ep0_irq(struct musb *musb)
608{
609 u16 csr;
610 u16 len;
611 void __iomem *mbase = musb->mregs;
612 void __iomem *regs = musb->endpoints[0].regs;
613 irqreturn_t retval = IRQ_NONE;
614
615 musb_ep_select(mbase, 0); /* select ep0 */
616 csr = musb_readw(regs, MUSB_CSR0);
617 len = musb_readb(regs, MUSB_COUNT0);
618
619 DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
620 csr, len,
621 musb_readb(mbase, MUSB_FADDR),
622 decode_ep0stage(musb->ep0_state));
623
624 /* I sent a stall.. need to acknowledge it now.. */
625 if (csr & MUSB_CSR0_P_SENTSTALL) {
626 musb_writew(regs, MUSB_CSR0,
627 csr & ~MUSB_CSR0_P_SENTSTALL);
628 retval = IRQ_HANDLED;
629 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
630 csr = musb_readw(regs, MUSB_CSR0);
631 }
632
633 /* request ended "early" */
634 if (csr & MUSB_CSR0_P_SETUPEND) {
635 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
636 retval = IRQ_HANDLED;
637 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
638 csr = musb_readw(regs, MUSB_CSR0);
639 /* NOTE: request may need completion */
640 }
641
642 /* docs from Mentor only describe tx, rx, and idle/setup states.
643 * we need to handle nuances around status stages, and also the
644 * case where status and setup stages come back-to-back ...
645 */
646 switch (musb->ep0_state) {
647
648 case MUSB_EP0_STAGE_TX:
649 /* irq on clearing txpktrdy */
650 if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
651 ep0_txstate(musb);
652 retval = IRQ_HANDLED;
653 }
654 break;
655
656 case MUSB_EP0_STAGE_RX:
657 /* irq on set rxpktrdy */
658 if (csr & MUSB_CSR0_RXPKTRDY) {
659 ep0_rxstate(musb);
660 retval = IRQ_HANDLED;
661 }
662 break;
663
664 case MUSB_EP0_STAGE_STATUSIN:
665 /* end of sequence #2 (OUT/RX state) or #3 (no data) */
666
667 /* update address (if needed) only @ the end of the
668 * status phase per usb spec, which also guarantees
669 * we get 10 msec to receive this irq... until this
670 * is done we won't see the next packet.
671 */
672 if (musb->set_address) {
673 musb->set_address = false;
674 musb_writeb(mbase, MUSB_FADDR, musb->address);
675 }
676
677 /* enter test mode if needed (exit by reset) */
678 else if (musb->test_mode) {
679 DBG(1, "entering TESTMODE\n");
680
681 if (MUSB_TEST_PACKET == musb->test_mode_nr)
682 musb_load_testpacket(musb);
683
684 musb_writeb(mbase, MUSB_TESTMODE,
685 musb->test_mode_nr);
686 }
687 /* FALLTHROUGH */
688
689 case MUSB_EP0_STAGE_STATUSOUT:
690 /* end of sequence #1: write to host (TX state) */
691 {
692 struct usb_request *req;
693
694 req = next_ep0_request(musb);
695 if (req)
696 musb_g_ep0_giveback(musb, req);
697 }
698 retval = IRQ_HANDLED;
699 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
700 /* FALLTHROUGH */
701
702 case MUSB_EP0_STAGE_SETUP:
703 if (csr & MUSB_CSR0_RXPKTRDY) {
704 struct usb_ctrlrequest setup;
705 int handled = 0;
706
707 if (len != 8) {
708 ERR("SETUP packet len %d != 8 ?\n", len);
709 break;
710 }
711 musb_read_setup(musb, &setup);
712 retval = IRQ_HANDLED;
713
714 /* sometimes the RESET won't be reported */
715 if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
716 u8 power;
717
718 printk(KERN_NOTICE "%s: peripheral reset "
719 "irq lost!\n",
720 musb_driver_name);
721 power = musb_readb(mbase, MUSB_POWER);
722 musb->g.speed = (power & MUSB_POWER_HSMODE)
723 ? USB_SPEED_HIGH : USB_SPEED_FULL;
724
725 }
726
727 switch (musb->ep0_state) {
728
729 /* sequence #3 (no data stage), includes requests
730 * we can't forward (notably SET_ADDRESS and the
731 * device/endpoint feature set/clear operations)
732 * plus SET_CONFIGURATION and others we must
733 */
734 case MUSB_EP0_STAGE_ACKWAIT:
735 handled = service_zero_data_request(
736 musb, &setup);
737
738 /* status stage might be immediate */
739 if (handled > 0) {
740 musb->ackpend |= MUSB_CSR0_P_DATAEND;
741 musb->ep0_state =
742 MUSB_EP0_STAGE_STATUSIN;
743 }
744 break;
745
746 /* sequence #1 (IN to host), includes GET_STATUS
747 * requests that we can't forward, GET_DESCRIPTOR
748 * and others that we must
749 */
750 case MUSB_EP0_STAGE_TX:
751 handled = service_in_request(musb, &setup);
752 if (handled > 0) {
753 musb->ackpend = MUSB_CSR0_TXPKTRDY
754 | MUSB_CSR0_P_DATAEND;
755 musb->ep0_state =
756 MUSB_EP0_STAGE_STATUSOUT;
757 }
758 break;
759
760 /* sequence #2 (OUT from host), always forward */
761 default: /* MUSB_EP0_STAGE_RX */
762 break;
763 }
764
765 DBG(3, "handled %d, csr %04x, ep0stage %s\n",
766 handled, csr,
767 decode_ep0stage(musb->ep0_state));
768
769 /* unless we need to delegate this to the gadget
770 * driver, we know how to wrap this up: csr0 has
771 * not yet been written.
772 */
773 if (handled < 0)
774 goto stall;
775 else if (handled > 0)
776 goto finish;
777
778 handled = forward_to_driver(musb, &setup);
779 if (handled < 0) {
780 musb_ep_select(mbase, 0);
781stall:
782 DBG(3, "stall (%d)\n", handled);
783 musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
784 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
785finish:
786 musb_writew(regs, MUSB_CSR0,
787 musb->ackpend);
788 musb->ackpend = 0;
789 }
790 }
791 break;
792
793 case MUSB_EP0_STAGE_ACKWAIT:
794 /* This should not happen. But happens with tusb6010 with
795 * g_file_storage and high speed. Do nothing.
796 */
797 retval = IRQ_HANDLED;
798 break;
799
800 default:
801 /* "can't happen" */
802 WARN_ON(1);
803 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
804 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
805 break;
806 }
807
808 return retval;
809}
810
811
812static int
813musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
814{
815 /* always enabled */
816 return -EINVAL;
817}
818
819static int musb_g_ep0_disable(struct usb_ep *e)
820{
821 /* always enabled */
822 return -EINVAL;
823}
824
825static int
826musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
827{
828 struct musb_ep *ep;
829 struct musb_request *req;
830 struct musb *musb;
831 int status;
832 unsigned long lockflags;
833 void __iomem *regs;
834
835 if (!e || !r)
836 return -EINVAL;
837
838 ep = to_musb_ep(e);
839 musb = ep->musb;
840 regs = musb->control_ep->regs;
841
842 req = to_musb_request(r);
843 req->musb = musb;
844 req->request.actual = 0;
845 req->request.status = -EINPROGRESS;
846 req->tx = ep->is_in;
847
848 spin_lock_irqsave(&musb->lock, lockflags);
849
850 if (!list_empty(&ep->req_list)) {
851 status = -EBUSY;
852 goto cleanup;
853 }
854
855 switch (musb->ep0_state) {
856 case MUSB_EP0_STAGE_RX: /* control-OUT data */
857 case MUSB_EP0_STAGE_TX: /* control-IN data */
858 case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
859 status = 0;
860 break;
861 default:
862 DBG(1, "ep0 request queued in state %d\n",
863 musb->ep0_state);
864 status = -EINVAL;
865 goto cleanup;
866 }
867
868 /* add request to the list */
869 list_add_tail(&(req->request.list), &(ep->req_list));
870
871 DBG(3, "queue to %s (%s), length=%d\n",
872 ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
873 req->request.length);
874
875 musb_ep_select(musb->mregs, 0);
876
877 /* sequence #1, IN ... start writing the data */
878 if (musb->ep0_state == MUSB_EP0_STAGE_TX)
879 ep0_txstate(musb);
880
881 /* sequence #3, no-data ... issue IN status */
882 else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
883 if (req->request.length)
884 status = -EINVAL;
885 else {
886 musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
887 musb_writew(regs, MUSB_CSR0,
888 musb->ackpend | MUSB_CSR0_P_DATAEND);
889 musb->ackpend = 0;
890 musb_g_ep0_giveback(ep->musb, r);
891 }
892
893 /* else for sequence #2 (OUT), caller provides a buffer
894 * before the next packet arrives. deferred responses
895 * (after SETUP is acked) are racey.
896 */
897 } else if (musb->ackpend) {
898 musb_writew(regs, MUSB_CSR0, musb->ackpend);
899 musb->ackpend = 0;
900 }
901
902cleanup:
903 spin_unlock_irqrestore(&musb->lock, lockflags);
904 return status;
905}
906
907static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
908{
909 /* we just won't support this */
910 return -EINVAL;
911}
912
913static int musb_g_ep0_halt(struct usb_ep *e, int value)
914{
915 struct musb_ep *ep;
916 struct musb *musb;
917 void __iomem *base, *regs;
918 unsigned long flags;
919 int status;
920 u16 csr;
921
922 if (!e || !value)
923 return -EINVAL;
924
925 ep = to_musb_ep(e);
926 musb = ep->musb;
927 base = musb->mregs;
928 regs = musb->control_ep->regs;
929 status = 0;
930
931 spin_lock_irqsave(&musb->lock, flags);
932
933 if (!list_empty(&ep->req_list)) {
934 status = -EBUSY;
935 goto cleanup;
936 }
937
938 musb_ep_select(base, 0);
939 csr = musb->ackpend;
940
941 switch (musb->ep0_state) {
942
943 /* Stalls are usually issued after parsing SETUP packet, either
944 * directly in irq context from setup() or else later.
945 */
946 case MUSB_EP0_STAGE_TX: /* control-IN data */
947 case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
948 case MUSB_EP0_STAGE_RX: /* control-OUT data */
949 csr = musb_readw(regs, MUSB_CSR0);
950 /* FALLTHROUGH */
951
952 /* It's also OK to issue stalls during callbacks when a non-empty
953 * DATA stage buffer has been read (or even written).
954 */
955 case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
956 case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
957
958 csr |= MUSB_CSR0_P_SENDSTALL;
959 musb_writew(regs, MUSB_CSR0, csr);
960 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
961 musb->ackpend = 0;
962 break;
963 default:
964 DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
965 status = -EINVAL;
966 }
967
968cleanup:
969 spin_unlock_irqrestore(&musb->lock, flags);
970 return status;
971}
972
973const struct usb_ep_ops musb_g_ep0_ops = {
974 .enable = musb_g_ep0_enable,
975 .disable = musb_g_ep0_disable,
976 .alloc_request = musb_alloc_request,
977 .free_request = musb_free_request,
978 .queue = musb_g_ep0_queue,
979 .dequeue = musb_g_ep0_dequeue,
980 .set_halt = musb_g_ep0_halt,
981};
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
new file mode 100644
index 000000000000..8b4be012669a
--- /dev/null
+++ b/drivers/usb/musb/musb_host.c
@@ -0,0 +1,2170 @@
1/*
2 * MUSB OTG driver host support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/delay.h>
38#include <linux/sched.h>
39#include <linux/slab.h>
40#include <linux/errno.h>
41#include <linux/init.h>
42#include <linux/list.h>
43
44#include "musb_core.h"
45#include "musb_host.h"
46
47
48/* MUSB HOST status 22-mar-2006
49 *
50 * - There's still lots of partial code duplication for fault paths, so
51 * they aren't handled as consistently as they need to be.
52 *
53 * - PIO mostly behaved when last tested.
54 * + including ep0, with all usbtest cases 9, 10
55 * + usbtest 14 (ep0out) doesn't seem to run at all
56 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
57 * configurations, but otherwise double buffering passes basic tests.
58 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
59 *
60 * - DMA (CPPI) ... partially behaves, not currently recommended
61 * + about 1/15 the speed of typical EHCI implementations (PCI)
62 * + RX, all too often reqpkt seems to misbehave after tx
63 * + TX, no known issues (other than evident silicon issue)
64 *
65 * - DMA (Mentor/OMAP) ...has at least toggle update problems
66 *
67 * - Still no traffic scheduling code to make NAKing for bulk or control
68 * transfers unable to starve other requests; or to make efficient use
69 * of hardware with periodic transfers. (Note that network drivers
70 * commonly post bulk reads that stay pending for a long time; these
71 * would make very visible trouble.)
72 *
73 * - Not tested with HNP, but some SRP paths seem to behave.
74 *
75 * NOTE 24-August-2006:
76 *
77 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
78 * extra endpoint for periodic use enabling hub + keybd + mouse. That
79 * mostly works, except that with "usbnet" it's easy to trigger cases
80 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
81 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
82 * although ARP RX wins. (That test was done with a full speed link.)
83 */
84
85
86/*
87 * NOTE on endpoint usage:
88 *
89 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
90 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
91 *
92 * (Yes, bulk _could_ use more of the endpoints than that, and would even
93 * benefit from it ... one remote device may easily be NAKing while others
94 * need to perform transfers in that same direction. The same thing could
95 * be done in software though, assuming dma cooperates.)
96 *
97 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
98 * So far that scheduling is both dumb and optimistic: the endpoint will be
99 * "claimed" until its software queue is no longer refilled. No multiplexing
100 * of transfers between endpoints, or anything clever.
101 */
102
103
104static void musb_ep_program(struct musb *musb, u8 epnum,
105 struct urb *urb, unsigned int nOut,
106 u8 *buf, u32 len);
107
108/*
109 * Clear TX fifo. Needed to avoid BABBLE errors.
110 */
111static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112{
113 void __iomem *epio = ep->regs;
114 u16 csr;
115 int retries = 1000;
116
117 csr = musb_readw(epio, MUSB_TXCSR);
118 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
119 DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
120 csr |= MUSB_TXCSR_FLUSHFIFO;
121 musb_writew(epio, MUSB_TXCSR, csr);
122 csr = musb_readw(epio, MUSB_TXCSR);
123 if (retries-- < 1) {
124 ERR("Could not flush host TX fifo: csr: %04x\n", csr);
125 return;
126 }
127 mdelay(1);
128 }
129}
130
131/*
132 * Start transmit. Caller is responsible for locking shared resources.
133 * musb must be locked.
134 */
135static inline void musb_h_tx_start(struct musb_hw_ep *ep)
136{
137 u16 txcsr;
138
139 /* NOTE: no locks here; caller should lock and select EP */
140 if (ep->epnum) {
141 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
142 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
143 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
144 } else {
145 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
146 musb_writew(ep->regs, MUSB_CSR0, txcsr);
147 }
148
149}
150
151static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
152{
153 u16 txcsr;
154
155 /* NOTE: no locks here; caller should lock and select EP */
156 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
157 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
158 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
159}
160
161/*
162 * Start the URB at the front of an endpoint's queue
163 * end must be claimed from the caller.
164 *
165 * Context: controller locked, irqs blocked
166 */
167static void
168musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
169{
170 u16 frame;
171 u32 len;
172 void *buf;
173 void __iomem *mbase = musb->mregs;
174 struct urb *urb = next_urb(qh);
175 struct musb_hw_ep *hw_ep = qh->hw_ep;
176 unsigned pipe = urb->pipe;
177 u8 address = usb_pipedevice(pipe);
178 int epnum = hw_ep->epnum;
179
180 /* initialize software qh state */
181 qh->offset = 0;
182 qh->segsize = 0;
183
184 /* gather right source of data */
185 switch (qh->type) {
186 case USB_ENDPOINT_XFER_CONTROL:
187 /* control transfers always start with SETUP */
188 is_in = 0;
189 hw_ep->out_qh = qh;
190 musb->ep0_stage = MUSB_EP0_START;
191 buf = urb->setup_packet;
192 len = 8;
193 break;
194 case USB_ENDPOINT_XFER_ISOC:
195 qh->iso_idx = 0;
196 qh->frame = 0;
197 buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
198 len = urb->iso_frame_desc[0].length;
199 break;
200 default: /* bulk, interrupt */
201 buf = urb->transfer_buffer;
202 len = urb->transfer_buffer_length;
203 }
204
205 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
206 qh, urb, address, qh->epnum,
207 is_in ? "in" : "out",
208 ({char *s; switch (qh->type) {
209 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
210 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
211 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
212 default: s = "-intr"; break;
213 }; s; }),
214 epnum, buf, len);
215
216 /* Configure endpoint */
217 if (is_in || hw_ep->is_shared_fifo)
218 hw_ep->in_qh = qh;
219 else
220 hw_ep->out_qh = qh;
221 musb_ep_program(musb, epnum, urb, !is_in, buf, len);
222
223 /* transmit may have more work: start it when it is time */
224 if (is_in)
225 return;
226
227 /* determine if the time is right for a periodic transfer */
228 switch (qh->type) {
229 case USB_ENDPOINT_XFER_ISOC:
230 case USB_ENDPOINT_XFER_INT:
231 DBG(3, "check whether there's still time for periodic Tx\n");
232 qh->iso_idx = 0;
233 frame = musb_readw(mbase, MUSB_FRAME);
234 /* FIXME this doesn't implement that scheduling policy ...
235 * or handle framecounter wrapping
236 */
237 if ((urb->transfer_flags & URB_ISO_ASAP)
238 || (frame >= urb->start_frame)) {
239 /* REVISIT the SOF irq handler shouldn't duplicate
240 * this code; and we don't init urb->start_frame...
241 */
242 qh->frame = 0;
243 goto start;
244 } else {
245 qh->frame = urb->start_frame;
246 /* enable SOF interrupt so we can count down */
247 DBG(1, "SOF for %d\n", epnum);
248#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
249 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
250#endif
251 }
252 break;
253 default:
254start:
255 DBG(4, "Start TX%d %s\n", epnum,
256 hw_ep->tx_channel ? "dma" : "pio");
257
258 if (!hw_ep->tx_channel)
259 musb_h_tx_start(hw_ep);
260 else if (is_cppi_enabled() || tusb_dma_omap())
261 cppi_host_txdma_start(hw_ep);
262 }
263}
264
265/* caller owns controller lock, irqs are blocked */
266static void
267__musb_giveback(struct musb *musb, struct urb *urb, int status)
268__releases(musb->lock)
269__acquires(musb->lock)
270{
271 DBG(({ int level; switch (urb->status) {
272 case 0:
273 level = 4;
274 break;
275 /* common/boring faults */
276 case -EREMOTEIO:
277 case -ESHUTDOWN:
278 case -ECONNRESET:
279 case -EPIPE:
280 level = 3;
281 break;
282 default:
283 level = 2;
284 break;
285 }; level; }),
286 "complete %p (%d), dev%d ep%d%s, %d/%d\n",
287 urb, urb->status,
288 usb_pipedevice(urb->pipe),
289 usb_pipeendpoint(urb->pipe),
290 usb_pipein(urb->pipe) ? "in" : "out",
291 urb->actual_length, urb->transfer_buffer_length
292 );
293
294 spin_unlock(&musb->lock);
295 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
296 spin_lock(&musb->lock);
297}
298
299/* for bulk/interrupt endpoints only */
300static inline void
301musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
302{
303 struct usb_device *udev = urb->dev;
304 u16 csr;
305 void __iomem *epio = ep->regs;
306 struct musb_qh *qh;
307
308 /* FIXME: the current Mentor DMA code seems to have
309 * problems getting toggle correct.
310 */
311
312 if (is_in || ep->is_shared_fifo)
313 qh = ep->in_qh;
314 else
315 qh = ep->out_qh;
316
317 if (!is_in) {
318 csr = musb_readw(epio, MUSB_TXCSR);
319 usb_settoggle(udev, qh->epnum, 1,
320 (csr & MUSB_TXCSR_H_DATATOGGLE)
321 ? 1 : 0);
322 } else {
323 csr = musb_readw(epio, MUSB_RXCSR);
324 usb_settoggle(udev, qh->epnum, 0,
325 (csr & MUSB_RXCSR_H_DATATOGGLE)
326 ? 1 : 0);
327 }
328}
329
330/* caller owns controller lock, irqs are blocked */
331static struct musb_qh *
332musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
333{
334 int is_in;
335 struct musb_hw_ep *ep = qh->hw_ep;
336 struct musb *musb = ep->musb;
337 int ready = qh->is_ready;
338
339 if (ep->is_shared_fifo)
340 is_in = 1;
341 else
342 is_in = usb_pipein(urb->pipe);
343
344 /* save toggle eagerly, for paranoia */
345 switch (qh->type) {
346 case USB_ENDPOINT_XFER_BULK:
347 case USB_ENDPOINT_XFER_INT:
348 musb_save_toggle(ep, is_in, urb);
349 break;
350 case USB_ENDPOINT_XFER_ISOC:
351 if (status == 0 && urb->error_count)
352 status = -EXDEV;
353 break;
354 }
355
356 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
357
358 qh->is_ready = 0;
359 __musb_giveback(musb, urb, status);
360 qh->is_ready = ready;
361
362 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
363 * invalidate qh as soon as list_empty(&hep->urb_list)
364 */
365 if (list_empty(&qh->hep->urb_list)) {
366 struct list_head *head;
367
368 if (is_in)
369 ep->rx_reinit = 1;
370 else
371 ep->tx_reinit = 1;
372
373 /* clobber old pointers to this qh */
374 if (is_in || ep->is_shared_fifo)
375 ep->in_qh = NULL;
376 else
377 ep->out_qh = NULL;
378 qh->hep->hcpriv = NULL;
379
380 switch (qh->type) {
381
382 case USB_ENDPOINT_XFER_ISOC:
383 case USB_ENDPOINT_XFER_INT:
384 /* this is where periodic bandwidth should be
385 * de-allocated if it's tracked and allocated;
386 * and where we'd update the schedule tree...
387 */
388 musb->periodic[ep->epnum] = NULL;
389 kfree(qh);
390 qh = NULL;
391 break;
392
393 case USB_ENDPOINT_XFER_CONTROL:
394 case USB_ENDPOINT_XFER_BULK:
395 /* fifo policy for these lists, except that NAKing
396 * should rotate a qh to the end (for fairness).
397 */
398 head = qh->ring.prev;
399 list_del(&qh->ring);
400 kfree(qh);
401 qh = first_qh(head);
402 break;
403 }
404 }
405 return qh;
406}
407
408/*
409 * Advance this hardware endpoint's queue, completing the specified urb and
410 * advancing to either the next urb queued to that qh, or else invalidating
411 * that qh and advancing to the next qh scheduled after the current one.
412 *
413 * Context: caller owns controller lock, irqs are blocked
414 */
415static void
416musb_advance_schedule(struct musb *musb, struct urb *urb,
417 struct musb_hw_ep *hw_ep, int is_in)
418{
419 struct musb_qh *qh;
420
421 if (is_in || hw_ep->is_shared_fifo)
422 qh = hw_ep->in_qh;
423 else
424 qh = hw_ep->out_qh;
425
426 if (urb->status == -EINPROGRESS)
427 qh = musb_giveback(qh, urb, 0);
428 else
429 qh = musb_giveback(qh, urb, urb->status);
430
431 if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
432 DBG(4, "... next ep%d %cX urb %p\n",
433 hw_ep->epnum, is_in ? 'R' : 'T',
434 next_urb(qh));
435 musb_start_urb(musb, is_in, qh);
436 }
437}
438
439static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
440{
441 /* we don't want fifo to fill itself again;
442 * ignore dma (various models),
443 * leave toggle alone (may not have been saved yet)
444 */
445 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
446 csr &= ~(MUSB_RXCSR_H_REQPKT
447 | MUSB_RXCSR_H_AUTOREQ
448 | MUSB_RXCSR_AUTOCLEAR);
449
450 /* write 2x to allow double buffering */
451 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
452 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
453
454 /* flush writebuffer */
455 return musb_readw(hw_ep->regs, MUSB_RXCSR);
456}
457
458/*
459 * PIO RX for a packet (or part of it).
460 */
461static bool
462musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
463{
464 u16 rx_count;
465 u8 *buf;
466 u16 csr;
467 bool done = false;
468 u32 length;
469 int do_flush = 0;
470 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
471 void __iomem *epio = hw_ep->regs;
472 struct musb_qh *qh = hw_ep->in_qh;
473 int pipe = urb->pipe;
474 void *buffer = urb->transfer_buffer;
475
476 /* musb_ep_select(mbase, epnum); */
477 rx_count = musb_readw(epio, MUSB_RXCOUNT);
478 DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
479 urb->transfer_buffer, qh->offset,
480 urb->transfer_buffer_length);
481
482 /* unload FIFO */
483 if (usb_pipeisoc(pipe)) {
484 int status = 0;
485 struct usb_iso_packet_descriptor *d;
486
487 if (iso_err) {
488 status = -EILSEQ;
489 urb->error_count++;
490 }
491
492 d = urb->iso_frame_desc + qh->iso_idx;
493 buf = buffer + d->offset;
494 length = d->length;
495 if (rx_count > length) {
496 if (status == 0) {
497 status = -EOVERFLOW;
498 urb->error_count++;
499 }
500 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
501 do_flush = 1;
502 } else
503 length = rx_count;
504 urb->actual_length += length;
505 d->actual_length = length;
506
507 d->status = status;
508
509 /* see if we are done */
510 done = (++qh->iso_idx >= urb->number_of_packets);
511 } else {
512 /* non-isoch */
513 buf = buffer + qh->offset;
514 length = urb->transfer_buffer_length - qh->offset;
515 if (rx_count > length) {
516 if (urb->status == -EINPROGRESS)
517 urb->status = -EOVERFLOW;
518 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
519 do_flush = 1;
520 } else
521 length = rx_count;
522 urb->actual_length += length;
523 qh->offset += length;
524
525 /* see if we are done */
526 done = (urb->actual_length == urb->transfer_buffer_length)
527 || (rx_count < qh->maxpacket)
528 || (urb->status != -EINPROGRESS);
529 if (done
530 && (urb->status == -EINPROGRESS)
531 && (urb->transfer_flags & URB_SHORT_NOT_OK)
532 && (urb->actual_length
533 < urb->transfer_buffer_length))
534 urb->status = -EREMOTEIO;
535 }
536
537 musb_read_fifo(hw_ep, length, buf);
538
539 csr = musb_readw(epio, MUSB_RXCSR);
540 csr |= MUSB_RXCSR_H_WZC_BITS;
541 if (unlikely(do_flush))
542 musb_h_flush_rxfifo(hw_ep, csr);
543 else {
544 /* REVISIT this assumes AUTOCLEAR is never set */
545 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
546 if (!done)
547 csr |= MUSB_RXCSR_H_REQPKT;
548 musb_writew(epio, MUSB_RXCSR, csr);
549 }
550
551 return done;
552}
553
554/* we don't always need to reinit a given side of an endpoint...
555 * when we do, use tx/rx reinit routine and then construct a new CSR
556 * to address data toggle, NYET, and DMA or PIO.
557 *
558 * it's possible that driver bugs (especially for DMA) or aborting a
559 * transfer might have left the endpoint busier than it should be.
560 * the busy/not-empty tests are basically paranoia.
561 */
562static void
563musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
564{
565 u16 csr;
566
567 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
568 * That always uses tx_reinit since ep0 repurposes TX register
569 * offsets; the initial SETUP packet is also a kind of OUT.
570 */
571
572 /* if programmed for Tx, put it in RX mode */
573 if (ep->is_shared_fifo) {
574 csr = musb_readw(ep->regs, MUSB_TXCSR);
575 if (csr & MUSB_TXCSR_MODE) {
576 musb_h_tx_flush_fifo(ep);
577 musb_writew(ep->regs, MUSB_TXCSR,
578 MUSB_TXCSR_FRCDATATOG);
579 }
580 /* clear mode (and everything else) to enable Rx */
581 musb_writew(ep->regs, MUSB_TXCSR, 0);
582
583 /* scrub all previous state, clearing toggle */
584 } else {
585 csr = musb_readw(ep->regs, MUSB_RXCSR);
586 if (csr & MUSB_RXCSR_RXPKTRDY)
587 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
588 musb_readw(ep->regs, MUSB_RXCOUNT));
589
590 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
591 }
592
593 /* target addr and (for multipoint) hub addr/port */
594 if (musb->is_multipoint) {
595 musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
596 qh->addr_reg);
597 musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
598 qh->h_addr_reg);
599 musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
600 qh->h_port_reg);
601 } else
602 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
603
604 /* protocol/endpoint, interval/NAKlimit, i/o size */
605 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
606 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
607 /* NOTE: bulk combining rewrites high bits of maxpacket */
608 musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
609
610 ep->rx_reinit = 0;
611}
612
613
614/*
615 * Program an HDRC endpoint as per the given URB
616 * Context: irqs blocked, controller lock held
617 */
618static void musb_ep_program(struct musb *musb, u8 epnum,
619 struct urb *urb, unsigned int is_out,
620 u8 *buf, u32 len)
621{
622 struct dma_controller *dma_controller;
623 struct dma_channel *dma_channel;
624 u8 dma_ok;
625 void __iomem *mbase = musb->mregs;
626 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
627 void __iomem *epio = hw_ep->regs;
628 struct musb_qh *qh;
629 u16 packet_sz;
630
631 if (!is_out || hw_ep->is_shared_fifo)
632 qh = hw_ep->in_qh;
633 else
634 qh = hw_ep->out_qh;
635
636 packet_sz = qh->maxpacket;
637
638 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
639 "h_addr%02x h_port%02x bytes %d\n",
640 is_out ? "-->" : "<--",
641 epnum, urb, urb->dev->speed,
642 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
643 qh->h_addr_reg, qh->h_port_reg,
644 len);
645
646 musb_ep_select(mbase, epnum);
647
648 /* candidate for DMA? */
649 dma_controller = musb->dma_controller;
650 if (is_dma_capable() && epnum && dma_controller) {
651 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
652 if (!dma_channel) {
653 dma_channel = dma_controller->channel_alloc(
654 dma_controller, hw_ep, is_out);
655 if (is_out)
656 hw_ep->tx_channel = dma_channel;
657 else
658 hw_ep->rx_channel = dma_channel;
659 }
660 } else
661 dma_channel = NULL;
662
663 /* make sure we clear DMAEnab, autoSet bits from previous run */
664
665 /* OUT/transmit/EP0 or IN/receive? */
666 if (is_out) {
667 u16 csr;
668 u16 int_txe;
669 u16 load_count;
670
671 csr = musb_readw(epio, MUSB_TXCSR);
672
673 /* disable interrupt in case we flush */
674 int_txe = musb_readw(mbase, MUSB_INTRTXE);
675 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
676
677 /* general endpoint setup */
678 if (epnum) {
679 /* ASSERT: TXCSR_DMAENAB was already cleared */
680
681 /* flush all old state, set default */
682 musb_h_tx_flush_fifo(hw_ep);
683 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
684 | MUSB_TXCSR_DMAMODE
685 | MUSB_TXCSR_FRCDATATOG
686 | MUSB_TXCSR_H_RXSTALL
687 | MUSB_TXCSR_H_ERROR
688 | MUSB_TXCSR_TXPKTRDY
689 );
690 csr |= MUSB_TXCSR_MODE;
691
692 if (usb_gettoggle(urb->dev,
693 qh->epnum, 1))
694 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
695 | MUSB_TXCSR_H_DATATOGGLE;
696 else
697 csr |= MUSB_TXCSR_CLRDATATOG;
698
699 /* twice in case of double packet buffering */
700 musb_writew(epio, MUSB_TXCSR, csr);
701 /* REVISIT may need to clear FLUSHFIFO ... */
702 musb_writew(epio, MUSB_TXCSR, csr);
703 csr = musb_readw(epio, MUSB_TXCSR);
704 } else {
705 /* endpoint 0: just flush */
706 musb_writew(epio, MUSB_CSR0,
707 csr | MUSB_CSR0_FLUSHFIFO);
708 musb_writew(epio, MUSB_CSR0,
709 csr | MUSB_CSR0_FLUSHFIFO);
710 }
711
712 /* target addr and (for multipoint) hub addr/port */
713 if (musb->is_multipoint) {
714 musb_writeb(mbase,
715 MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
716 qh->addr_reg);
717 musb_writeb(mbase,
718 MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
719 qh->h_addr_reg);
720 musb_writeb(mbase,
721 MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
722 qh->h_port_reg);
723/* FIXME if !epnum, do the same for RX ... */
724 } else
725 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
726
727 /* protocol/endpoint/interval/NAKlimit */
728 if (epnum) {
729 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
730 if (can_bulk_split(musb, qh->type))
731 musb_writew(epio, MUSB_TXMAXP,
732 packet_sz
733 | ((hw_ep->max_packet_sz_tx /
734 packet_sz) - 1) << 11);
735 else
736 musb_writew(epio, MUSB_TXMAXP,
737 packet_sz);
738 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
739 } else {
740 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
741 if (musb->is_multipoint)
742 musb_writeb(epio, MUSB_TYPE0,
743 qh->type_reg);
744 }
745
746 if (can_bulk_split(musb, qh->type))
747 load_count = min((u32) hw_ep->max_packet_sz_tx,
748 len);
749 else
750 load_count = min((u32) packet_sz, len);
751
752#ifdef CONFIG_USB_INVENTRA_DMA
753 if (dma_channel) {
754
755 /* clear previous state */
756 csr = musb_readw(epio, MUSB_TXCSR);
757 csr &= ~(MUSB_TXCSR_AUTOSET
758 | MUSB_TXCSR_DMAMODE
759 | MUSB_TXCSR_DMAENAB);
760 csr |= MUSB_TXCSR_MODE;
761 musb_writew(epio, MUSB_TXCSR,
762 csr | MUSB_TXCSR_MODE);
763
764 qh->segsize = min(len, dma_channel->max_len);
765
766 if (qh->segsize <= packet_sz)
767 dma_channel->desired_mode = 0;
768 else
769 dma_channel->desired_mode = 1;
770
771
772 if (dma_channel->desired_mode == 0) {
773 csr &= ~(MUSB_TXCSR_AUTOSET
774 | MUSB_TXCSR_DMAMODE);
775 csr |= (MUSB_TXCSR_DMAENAB);
776 /* against programming guide */
777 } else
778 csr |= (MUSB_TXCSR_AUTOSET
779 | MUSB_TXCSR_DMAENAB
780 | MUSB_TXCSR_DMAMODE);
781
782 musb_writew(epio, MUSB_TXCSR, csr);
783
784 dma_ok = dma_controller->channel_program(
785 dma_channel, packet_sz,
786 dma_channel->desired_mode,
787 urb->transfer_dma,
788 qh->segsize);
789 if (dma_ok) {
790 load_count = 0;
791 } else {
792 dma_controller->channel_release(dma_channel);
793 if (is_out)
794 hw_ep->tx_channel = NULL;
795 else
796 hw_ep->rx_channel = NULL;
797 dma_channel = NULL;
798 }
799 }
800#endif
801
802 /* candidate for DMA */
803 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
804
805 /* program endpoint CSRs first, then setup DMA.
806 * assume CPPI setup succeeds.
807 * defer enabling dma.
808 */
809 csr = musb_readw(epio, MUSB_TXCSR);
810 csr &= ~(MUSB_TXCSR_AUTOSET
811 | MUSB_TXCSR_DMAMODE
812 | MUSB_TXCSR_DMAENAB);
813 csr |= MUSB_TXCSR_MODE;
814 musb_writew(epio, MUSB_TXCSR,
815 csr | MUSB_TXCSR_MODE);
816
817 dma_channel->actual_len = 0L;
818 qh->segsize = len;
819
820 /* TX uses "rndis" mode automatically, but needs help
821 * to identify the zero-length-final-packet case.
822 */
823 dma_ok = dma_controller->channel_program(
824 dma_channel, packet_sz,
825 (urb->transfer_flags
826 & URB_ZERO_PACKET)
827 == URB_ZERO_PACKET,
828 urb->transfer_dma,
829 qh->segsize);
830 if (dma_ok) {
831 load_count = 0;
832 } else {
833 dma_controller->channel_release(dma_channel);
834 hw_ep->tx_channel = NULL;
835 dma_channel = NULL;
836
837 /* REVISIT there's an error path here that
838 * needs handling: can't do dma, but
839 * there's no pio buffer address...
840 */
841 }
842 }
843
844 if (load_count) {
845 /* ASSERT: TXCSR_DMAENAB was already cleared */
846
847 /* PIO to load FIFO */
848 qh->segsize = load_count;
849 musb_write_fifo(hw_ep, load_count, buf);
850 csr = musb_readw(epio, MUSB_TXCSR);
851 csr &= ~(MUSB_TXCSR_DMAENAB
852 | MUSB_TXCSR_DMAMODE
853 | MUSB_TXCSR_AUTOSET);
854 /* write CSR */
855 csr |= MUSB_TXCSR_MODE;
856
857 if (epnum)
858 musb_writew(epio, MUSB_TXCSR, csr);
859 }
860
861 /* re-enable interrupt */
862 musb_writew(mbase, MUSB_INTRTXE, int_txe);
863
864 /* IN/receive */
865 } else {
866 u16 csr;
867
868 if (hw_ep->rx_reinit) {
869 musb_rx_reinit(musb, qh, hw_ep);
870
871 /* init new state: toggle and NYET, maybe DMA later */
872 if (usb_gettoggle(urb->dev, qh->epnum, 0))
873 csr = MUSB_RXCSR_H_WR_DATATOGGLE
874 | MUSB_RXCSR_H_DATATOGGLE;
875 else
876 csr = 0;
877 if (qh->type == USB_ENDPOINT_XFER_INT)
878 csr |= MUSB_RXCSR_DISNYET;
879
880 } else {
881 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
882
883 if (csr & (MUSB_RXCSR_RXPKTRDY
884 | MUSB_RXCSR_DMAENAB
885 | MUSB_RXCSR_H_REQPKT))
886 ERR("broken !rx_reinit, ep%d csr %04x\n",
887 hw_ep->epnum, csr);
888
889 /* scrub any stale state, leaving toggle alone */
890 csr &= MUSB_RXCSR_DISNYET;
891 }
892
893 /* kick things off */
894
895 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
896 /* candidate for DMA */
897 if (dma_channel) {
898 dma_channel->actual_len = 0L;
899 qh->segsize = len;
900
901 /* AUTOREQ is in a DMA register */
902 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
903 csr = musb_readw(hw_ep->regs,
904 MUSB_RXCSR);
905
906 /* unless caller treats short rx transfers as
907 * errors, we dare not queue multiple transfers.
908 */
909 dma_ok = dma_controller->channel_program(
910 dma_channel, packet_sz,
911 !(urb->transfer_flags
912 & URB_SHORT_NOT_OK),
913 urb->transfer_dma,
914 qh->segsize);
915 if (!dma_ok) {
916 dma_controller->channel_release(
917 dma_channel);
918 hw_ep->rx_channel = NULL;
919 dma_channel = NULL;
920 } else
921 csr |= MUSB_RXCSR_DMAENAB;
922 }
923 }
924
925 csr |= MUSB_RXCSR_H_REQPKT;
926 DBG(7, "RXCSR%d := %04x\n", epnum, csr);
927 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
928 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
929 }
930}
931
932
933/*
934 * Service the default endpoint (ep0) as host.
935 * Return true until it's time to start the status stage.
936 */
937static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
938{
939 bool more = false;
940 u8 *fifo_dest = NULL;
941 u16 fifo_count = 0;
942 struct musb_hw_ep *hw_ep = musb->control_ep;
943 struct musb_qh *qh = hw_ep->in_qh;
944 struct usb_ctrlrequest *request;
945
946 switch (musb->ep0_stage) {
947 case MUSB_EP0_IN:
948 fifo_dest = urb->transfer_buffer + urb->actual_length;
949 fifo_count = min(len, ((u16) (urb->transfer_buffer_length
950 - urb->actual_length)));
951 if (fifo_count < len)
952 urb->status = -EOVERFLOW;
953
954 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
955
956 urb->actual_length += fifo_count;
957 if (len < qh->maxpacket) {
958 /* always terminate on short read; it's
959 * rarely reported as an error.
960 */
961 } else if (urb->actual_length <
962 urb->transfer_buffer_length)
963 more = true;
964 break;
965 case MUSB_EP0_START:
966 request = (struct usb_ctrlrequest *) urb->setup_packet;
967
968 if (!request->wLength) {
969 DBG(4, "start no-DATA\n");
970 break;
971 } else if (request->bRequestType & USB_DIR_IN) {
972 DBG(4, "start IN-DATA\n");
973 musb->ep0_stage = MUSB_EP0_IN;
974 more = true;
975 break;
976 } else {
977 DBG(4, "start OUT-DATA\n");
978 musb->ep0_stage = MUSB_EP0_OUT;
979 more = true;
980 }
981 /* FALLTHROUGH */
982 case MUSB_EP0_OUT:
983 fifo_count = min(qh->maxpacket, ((u16)
984 (urb->transfer_buffer_length
985 - urb->actual_length)));
986
987 if (fifo_count) {
988 fifo_dest = (u8 *) (urb->transfer_buffer
989 + urb->actual_length);
990 DBG(3, "Sending %d bytes to %p\n",
991 fifo_count, fifo_dest);
992 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
993
994 urb->actual_length += fifo_count;
995 more = true;
996 }
997 break;
998 default:
999 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1000 break;
1001 }
1002
1003 return more;
1004}
1005
1006/*
1007 * Handle default endpoint interrupt as host. Only called in IRQ time
1008 * from the LinuxIsr() interrupt service routine.
1009 *
1010 * called with controller irqlocked
1011 */
1012irqreturn_t musb_h_ep0_irq(struct musb *musb)
1013{
1014 struct urb *urb;
1015 u16 csr, len;
1016 int status = 0;
1017 void __iomem *mbase = musb->mregs;
1018 struct musb_hw_ep *hw_ep = musb->control_ep;
1019 void __iomem *epio = hw_ep->regs;
1020 struct musb_qh *qh = hw_ep->in_qh;
1021 bool complete = false;
1022 irqreturn_t retval = IRQ_NONE;
1023
1024 /* ep0 only has one queue, "in" */
1025 urb = next_urb(qh);
1026
1027 musb_ep_select(mbase, 0);
1028 csr = musb_readw(epio, MUSB_CSR0);
1029 len = (csr & MUSB_CSR0_RXPKTRDY)
1030 ? musb_readb(epio, MUSB_COUNT0)
1031 : 0;
1032
1033 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1034 csr, qh, len, urb, musb->ep0_stage);
1035
1036 /* if we just did status stage, we are done */
1037 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1038 retval = IRQ_HANDLED;
1039 complete = true;
1040 }
1041
1042 /* prepare status */
1043 if (csr & MUSB_CSR0_H_RXSTALL) {
1044 DBG(6, "STALLING ENDPOINT\n");
1045 status = -EPIPE;
1046
1047 } else if (csr & MUSB_CSR0_H_ERROR) {
1048 DBG(2, "no response, csr0 %04x\n", csr);
1049 status = -EPROTO;
1050
1051 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1052 DBG(2, "control NAK timeout\n");
1053
1054 /* NOTE: this code path would be a good place to PAUSE a
1055 * control transfer, if another one is queued, so that
1056 * ep0 is more likely to stay busy.
1057 *
1058 * if (qh->ring.next != &musb->control), then
1059 * we have a candidate... NAKing is *NOT* an error
1060 */
1061 musb_writew(epio, MUSB_CSR0, 0);
1062 retval = IRQ_HANDLED;
1063 }
1064
1065 if (status) {
1066 DBG(6, "aborting\n");
1067 retval = IRQ_HANDLED;
1068 if (urb)
1069 urb->status = status;
1070 complete = true;
1071
1072 /* use the proper sequence to abort the transfer */
1073 if (csr & MUSB_CSR0_H_REQPKT) {
1074 csr &= ~MUSB_CSR0_H_REQPKT;
1075 musb_writew(epio, MUSB_CSR0, csr);
1076 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1077 musb_writew(epio, MUSB_CSR0, csr);
1078 } else {
1079 csr |= MUSB_CSR0_FLUSHFIFO;
1080 musb_writew(epio, MUSB_CSR0, csr);
1081 musb_writew(epio, MUSB_CSR0, csr);
1082 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1083 musb_writew(epio, MUSB_CSR0, csr);
1084 }
1085
1086 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1087
1088 /* clear it */
1089 musb_writew(epio, MUSB_CSR0, 0);
1090 }
1091
1092 if (unlikely(!urb)) {
1093 /* stop endpoint since we have no place for its data, this
1094 * SHOULD NEVER HAPPEN! */
1095 ERR("no URB for end 0\n");
1096
1097 musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1098 musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1099 musb_writew(epio, MUSB_CSR0, 0);
1100
1101 goto done;
1102 }
1103
1104 if (!complete) {
1105 /* call common logic and prepare response */
1106 if (musb_h_ep0_continue(musb, len, urb)) {
1107 /* more packets required */
1108 csr = (MUSB_EP0_IN == musb->ep0_stage)
1109 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1110 } else {
1111 /* data transfer complete; perform status phase */
1112 if (usb_pipeout(urb->pipe)
1113 || !urb->transfer_buffer_length)
1114 csr = MUSB_CSR0_H_STATUSPKT
1115 | MUSB_CSR0_H_REQPKT;
1116 else
1117 csr = MUSB_CSR0_H_STATUSPKT
1118 | MUSB_CSR0_TXPKTRDY;
1119
1120 /* flag status stage */
1121 musb->ep0_stage = MUSB_EP0_STATUS;
1122
1123 DBG(5, "ep0 STATUS, csr %04x\n", csr);
1124
1125 }
1126 musb_writew(epio, MUSB_CSR0, csr);
1127 retval = IRQ_HANDLED;
1128 } else
1129 musb->ep0_stage = MUSB_EP0_IDLE;
1130
1131 /* call completion handler if done */
1132 if (complete)
1133 musb_advance_schedule(musb, urb, hw_ep, 1);
1134done:
1135 return retval;
1136}
1137
1138
1139#ifdef CONFIG_USB_INVENTRA_DMA
1140
1141/* Host side TX (OUT) using Mentor DMA works as follows:
1142 submit_urb ->
1143 - if queue was empty, Program Endpoint
1144 - ... which starts DMA to fifo in mode 1 or 0
1145
1146 DMA Isr (transfer complete) -> TxAvail()
1147 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1148 only in musb_cleanup_urb)
1149 - TxPktRdy has to be set in mode 0 or for
1150 short packets in mode 1.
1151*/
1152
1153#endif
1154
1155/* Service a Tx-Available or dma completion irq for the endpoint */
1156void musb_host_tx(struct musb *musb, u8 epnum)
1157{
1158 int pipe;
1159 bool done = false;
1160 u16 tx_csr;
1161 size_t wLength = 0;
1162 u8 *buf = NULL;
1163 struct urb *urb;
1164 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1165 void __iomem *epio = hw_ep->regs;
1166 struct musb_qh *qh = hw_ep->out_qh;
1167 u32 status = 0;
1168 void __iomem *mbase = musb->mregs;
1169 struct dma_channel *dma;
1170
1171 urb = next_urb(qh);
1172
1173 musb_ep_select(mbase, epnum);
1174 tx_csr = musb_readw(epio, MUSB_TXCSR);
1175
1176 /* with CPPI, DMA sometimes triggers "extra" irqs */
1177 if (!urb) {
1178 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1179 goto finish;
1180 }
1181
1182 pipe = urb->pipe;
1183 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1184 DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1185 dma ? ", dma" : "");
1186
1187 /* check for errors */
1188 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1189 /* dma was disabled, fifo flushed */
1190 DBG(3, "TX end %d stall\n", epnum);
1191
1192 /* stall; record URB status */
1193 status = -EPIPE;
1194
1195 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1196 /* (NON-ISO) dma was disabled, fifo flushed */
1197 DBG(3, "TX 3strikes on ep=%d\n", epnum);
1198
1199 status = -ETIMEDOUT;
1200
1201 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1202 DBG(6, "TX end=%d device not responding\n", epnum);
1203
1204 /* NOTE: this code path would be a good place to PAUSE a
1205 * transfer, if there's some other (nonperiodic) tx urb
1206 * that could use this fifo. (dma complicates it...)
1207 *
1208 * if (bulk && qh->ring.next != &musb->out_bulk), then
1209 * we have a candidate... NAKing is *NOT* an error
1210 */
1211 musb_ep_select(mbase, epnum);
1212 musb_writew(epio, MUSB_TXCSR,
1213 MUSB_TXCSR_H_WZC_BITS
1214 | MUSB_TXCSR_TXPKTRDY);
1215 goto finish;
1216 }
1217
1218 if (status) {
1219 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1220 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1221 (void) musb->dma_controller->channel_abort(dma);
1222 }
1223
1224 /* do the proper sequence to abort the transfer in the
1225 * usb core; the dma engine should already be stopped.
1226 */
1227 musb_h_tx_flush_fifo(hw_ep);
1228 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1229 | MUSB_TXCSR_DMAENAB
1230 | MUSB_TXCSR_H_ERROR
1231 | MUSB_TXCSR_H_RXSTALL
1232 | MUSB_TXCSR_H_NAKTIMEOUT
1233 );
1234
1235 musb_ep_select(mbase, epnum);
1236 musb_writew(epio, MUSB_TXCSR, tx_csr);
1237 /* REVISIT may need to clear FLUSHFIFO ... */
1238 musb_writew(epio, MUSB_TXCSR, tx_csr);
1239 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1240
1241 done = true;
1242 }
1243
1244 /* second cppi case */
1245 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1246 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1247 goto finish;
1248
1249 }
1250
1251 /* REVISIT this looks wrong... */
1252 if (!status || dma || usb_pipeisoc(pipe)) {
1253 if (dma)
1254 wLength = dma->actual_len;
1255 else
1256 wLength = qh->segsize;
1257 qh->offset += wLength;
1258
1259 if (usb_pipeisoc(pipe)) {
1260 struct usb_iso_packet_descriptor *d;
1261
1262 d = urb->iso_frame_desc + qh->iso_idx;
1263 d->actual_length = qh->segsize;
1264 if (++qh->iso_idx >= urb->number_of_packets) {
1265 done = true;
1266 } else {
1267 d++;
1268 buf = urb->transfer_buffer + d->offset;
1269 wLength = d->length;
1270 }
1271 } else if (dma) {
1272 done = true;
1273 } else {
1274 /* see if we need to send more data, or ZLP */
1275 if (qh->segsize < qh->maxpacket)
1276 done = true;
1277 else if (qh->offset == urb->transfer_buffer_length
1278 && !(urb->transfer_flags
1279 & URB_ZERO_PACKET))
1280 done = true;
1281 if (!done) {
1282 buf = urb->transfer_buffer
1283 + qh->offset;
1284 wLength = urb->transfer_buffer_length
1285 - qh->offset;
1286 }
1287 }
1288 }
1289
1290 /* urb->status != -EINPROGRESS means request has been faulted,
1291 * so we must abort this transfer after cleanup
1292 */
1293 if (urb->status != -EINPROGRESS) {
1294 done = true;
1295 if (status == 0)
1296 status = urb->status;
1297 }
1298
1299 if (done) {
1300 /* set status */
1301 urb->status = status;
1302 urb->actual_length = qh->offset;
1303 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1304
1305 } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
1306 /* WARN_ON(!buf); */
1307
1308 /* REVISIT: some docs say that when hw_ep->tx_double_buffered,
1309 * (and presumably, fifo is not half-full) we should write TWO
1310 * packets before updating TXCSR ... other docs disagree ...
1311 */
1312 /* PIO: start next packet in this URB */
1313 wLength = min(qh->maxpacket, (u16) wLength);
1314 musb_write_fifo(hw_ep, wLength, buf);
1315 qh->segsize = wLength;
1316
1317 musb_ep_select(mbase, epnum);
1318 musb_writew(epio, MUSB_TXCSR,
1319 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1320 } else
1321 DBG(1, "not complete, but dma enabled?\n");
1322
1323finish:
1324 return;
1325}
1326
1327
1328#ifdef CONFIG_USB_INVENTRA_DMA
1329
1330/* Host side RX (IN) using Mentor DMA works as follows:
1331 submit_urb ->
1332 - if queue was empty, ProgramEndpoint
1333 - first IN token is sent out (by setting ReqPkt)
1334 LinuxIsr -> RxReady()
1335 /\ => first packet is received
1336 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1337 | -> DMA Isr (transfer complete) -> RxReady()
1338 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1339 | - if urb not complete, send next IN token (ReqPkt)
1340 | | else complete urb.
1341 | |
1342 ---------------------------
1343 *
1344 * Nuances of mode 1:
1345 * For short packets, no ack (+RxPktRdy) is sent automatically
1346 * (even if AutoClear is ON)
1347 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1348 * automatically => major problem, as collecting the next packet becomes
1349 * difficult. Hence mode 1 is not used.
1350 *
1351 * REVISIT
1352 * All we care about at this driver level is that
1353 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1354 * (b) termination conditions are: short RX, or buffer full;
1355 * (c) fault modes include
1356 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1357 * (and that endpoint's dma queue stops immediately)
1358 * - overflow (full, PLUS more bytes in the terminal packet)
1359 *
1360 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1361 * thus be a great candidate for using mode 1 ... for all but the
1362 * last packet of one URB's transfer.
1363 */
1364
1365#endif
1366
1367/*
1368 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1369 * and high-bandwidth IN transfer cases.
1370 */
1371void musb_host_rx(struct musb *musb, u8 epnum)
1372{
1373 struct urb *urb;
1374 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1375 void __iomem *epio = hw_ep->regs;
1376 struct musb_qh *qh = hw_ep->in_qh;
1377 size_t xfer_len;
1378 void __iomem *mbase = musb->mregs;
1379 int pipe;
1380 u16 rx_csr, val;
1381 bool iso_err = false;
1382 bool done = false;
1383 u32 status;
1384 struct dma_channel *dma;
1385
1386 musb_ep_select(mbase, epnum);
1387
1388 urb = next_urb(qh);
1389 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1390 status = 0;
1391 xfer_len = 0;
1392
1393 rx_csr = musb_readw(epio, MUSB_RXCSR);
1394 val = rx_csr;
1395
1396 if (unlikely(!urb)) {
1397 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1398 * usbtest #11 (unlinks) triggers it regularly, sometimes
1399 * with fifo full. (Only with DMA??)
1400 */
1401 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1402 musb_readw(epio, MUSB_RXCOUNT));
1403 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1404 return;
1405 }
1406
1407 pipe = urb->pipe;
1408
1409 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1410 epnum, rx_csr, urb->actual_length,
1411 dma ? dma->actual_len : 0);
1412
1413 /* check for errors, concurrent stall & unlink is not really
1414 * handled yet! */
1415 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1416 DBG(3, "RX end %d STALL\n", epnum);
1417
1418 /* stall; record URB status */
1419 status = -EPIPE;
1420
1421 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1422 DBG(3, "end %d RX proto error\n", epnum);
1423
1424 status = -EPROTO;
1425 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1426
1427 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1428
1429 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1430 /* NOTE this code path would be a good place to PAUSE a
1431 * transfer, if there's some other (nonperiodic) rx urb
1432 * that could use this fifo. (dma complicates it...)
1433 *
1434 * if (bulk && qh->ring.next != &musb->in_bulk), then
1435 * we have a candidate... NAKing is *NOT* an error
1436 */
1437 DBG(6, "RX end %d NAK timeout\n", epnum);
1438 musb_ep_select(mbase, epnum);
1439 musb_writew(epio, MUSB_RXCSR,
1440 MUSB_RXCSR_H_WZC_BITS
1441 | MUSB_RXCSR_H_REQPKT);
1442
1443 goto finish;
1444 } else {
1445 DBG(4, "RX end %d ISO data error\n", epnum);
1446 /* packet error reported later */
1447 iso_err = true;
1448 }
1449 }
1450
1451 /* faults abort the transfer */
1452 if (status) {
1453 /* clean up dma and collect transfer count */
1454 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1455 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1456 (void) musb->dma_controller->channel_abort(dma);
1457 xfer_len = dma->actual_len;
1458 }
1459 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1460 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1461 done = true;
1462 goto finish;
1463 }
1464
1465 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1466 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1467 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1468 goto finish;
1469 }
1470
1471 /* thorough shutdown for now ... given more precise fault handling
1472 * and better queueing support, we might keep a DMA pipeline going
1473 * while processing this irq for earlier completions.
1474 */
1475
1476 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1477
1478#ifndef CONFIG_USB_INVENTRA_DMA
1479 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1480 /* REVISIT this happened for a while on some short reads...
1481 * the cleanup still needs investigation... looks bad...
1482 * and also duplicates dma cleanup code above ... plus,
1483 * shouldn't this be the "half full" double buffer case?
1484 */
1485 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1486 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1487 (void) musb->dma_controller->channel_abort(dma);
1488 xfer_len = dma->actual_len;
1489 done = true;
1490 }
1491
1492 DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1493 xfer_len, dma ? ", dma" : "");
1494 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1495
1496 musb_ep_select(mbase, epnum);
1497 musb_writew(epio, MUSB_RXCSR,
1498 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1499 }
1500#endif
1501 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1502 xfer_len = dma->actual_len;
1503
1504 val &= ~(MUSB_RXCSR_DMAENAB
1505 | MUSB_RXCSR_H_AUTOREQ
1506 | MUSB_RXCSR_AUTOCLEAR
1507 | MUSB_RXCSR_RXPKTRDY);
1508 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1509
1510#ifdef CONFIG_USB_INVENTRA_DMA
1511 /* done if urb buffer is full or short packet is recd */
1512 done = (urb->actual_length + xfer_len >=
1513 urb->transfer_buffer_length
1514 || dma->actual_len < qh->maxpacket);
1515
1516 /* send IN token for next packet, without AUTOREQ */
1517 if (!done) {
1518 val |= MUSB_RXCSR_H_REQPKT;
1519 musb_writew(epio, MUSB_RXCSR,
1520 MUSB_RXCSR_H_WZC_BITS | val);
1521 }
1522
1523 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1524 done ? "off" : "reset",
1525 musb_readw(epio, MUSB_RXCSR),
1526 musb_readw(epio, MUSB_RXCOUNT));
1527#else
1528 done = true;
1529#endif
1530 } else if (urb->status == -EINPROGRESS) {
1531 /* if no errors, be sure a packet is ready for unloading */
1532 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1533 status = -EPROTO;
1534 ERR("Rx interrupt with no errors or packet!\n");
1535
1536 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1537
1538/* SCRUB (RX) */
1539 /* do the proper sequence to abort the transfer */
1540 musb_ep_select(mbase, epnum);
1541 val &= ~MUSB_RXCSR_H_REQPKT;
1542 musb_writew(epio, MUSB_RXCSR, val);
1543 goto finish;
1544 }
1545
1546 /* we are expecting IN packets */
1547#ifdef CONFIG_USB_INVENTRA_DMA
1548 if (dma) {
1549 struct dma_controller *c;
1550 u16 rx_count;
1551 int ret;
1552
1553 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1554
1555 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1556 epnum, rx_count,
1557 urb->transfer_dma
1558 + urb->actual_length,
1559 qh->offset,
1560 urb->transfer_buffer_length);
1561
1562 c = musb->dma_controller;
1563
1564 dma->desired_mode = 0;
1565#ifdef USE_MODE1
1566 /* because of the issue below, mode 1 will
1567 * only rarely behave with correct semantics.
1568 */
1569 if ((urb->transfer_flags &
1570 URB_SHORT_NOT_OK)
1571 && (urb->transfer_buffer_length -
1572 urb->actual_length)
1573 > qh->maxpacket)
1574 dma->desired_mode = 1;
1575#endif
1576
1577/* Disadvantage of using mode 1:
1578 * It's basically usable only for mass storage class; essentially all
1579 * other protocols also terminate transfers on short packets.
1580 *
1581 * Details:
1582 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1583 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1584 * to use the extra IN token to grab the last packet using mode 0, then
1585 * the problem is that you cannot be sure when the device will send the
1586 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1587 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1588 * transfer, while sometimes it is recd just a little late so that if you
1589 * try to configure for mode 0 soon after the mode 1 transfer is
1590 * completed, you will find rxcount 0. Okay, so you might think why not
1591 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1592 */
1593
1594 val = musb_readw(epio, MUSB_RXCSR);
1595 val &= ~MUSB_RXCSR_H_REQPKT;
1596
1597 if (dma->desired_mode == 0)
1598 val &= ~MUSB_RXCSR_H_AUTOREQ;
1599 else
1600 val |= MUSB_RXCSR_H_AUTOREQ;
1601 val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
1602
1603 musb_writew(epio, MUSB_RXCSR,
1604 MUSB_RXCSR_H_WZC_BITS | val);
1605
1606 /* REVISIT if when actual_length != 0,
1607 * transfer_buffer_length needs to be
1608 * adjusted first...
1609 */
1610 ret = c->channel_program(
1611 dma, qh->maxpacket,
1612 dma->desired_mode,
1613 urb->transfer_dma
1614 + urb->actual_length,
1615 (dma->desired_mode == 0)
1616 ? rx_count
1617 : urb->transfer_buffer_length);
1618
1619 if (!ret) {
1620 c->channel_release(dma);
1621 hw_ep->rx_channel = NULL;
1622 dma = NULL;
1623 /* REVISIT reset CSR */
1624 }
1625 }
1626#endif /* Mentor DMA */
1627
1628 if (!dma) {
1629 done = musb_host_packet_rx(musb, urb,
1630 epnum, iso_err);
1631 DBG(6, "read %spacket\n", done ? "last " : "");
1632 }
1633 }
1634
1635 if (dma && usb_pipeisoc(pipe)) {
1636 struct usb_iso_packet_descriptor *d;
1637 int iso_stat = status;
1638
1639 d = urb->iso_frame_desc + qh->iso_idx;
1640 d->actual_length += xfer_len;
1641 if (iso_err) {
1642 iso_stat = -EILSEQ;
1643 urb->error_count++;
1644 }
1645 d->status = iso_stat;
1646 }
1647
1648finish:
1649 urb->actual_length += xfer_len;
1650 qh->offset += xfer_len;
1651 if (done) {
1652 if (urb->status == -EINPROGRESS)
1653 urb->status = status;
1654 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1655 }
1656}
1657
1658/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1659 * the software schedule associates multiple such nodes with a given
1660 * host side hardware endpoint + direction; scheduling may activate
1661 * that hardware endpoint.
1662 */
1663static int musb_schedule(
1664 struct musb *musb,
1665 struct musb_qh *qh,
1666 int is_in)
1667{
1668 int idle;
1669 int best_diff;
1670 int best_end, epnum;
1671 struct musb_hw_ep *hw_ep = NULL;
1672 struct list_head *head = NULL;
1673
1674 /* use fixed hardware for control and bulk */
1675 switch (qh->type) {
1676 case USB_ENDPOINT_XFER_CONTROL:
1677 head = &musb->control;
1678 hw_ep = musb->control_ep;
1679 break;
1680 case USB_ENDPOINT_XFER_BULK:
1681 hw_ep = musb->bulk_ep;
1682 if (is_in)
1683 head = &musb->in_bulk;
1684 else
1685 head = &musb->out_bulk;
1686 break;
1687 }
1688 if (head) {
1689 idle = list_empty(head);
1690 list_add_tail(&qh->ring, head);
1691 goto success;
1692 }
1693
1694 /* else, periodic transfers get muxed to other endpoints */
1695
1696 /* FIXME this doesn't consider direction, so it can only
1697 * work for one half of the endpoint hardware, and assumes
1698 * the previous cases handled all non-shared endpoints...
1699 */
1700
1701 /* we know this qh hasn't been scheduled, so all we need to do
1702 * is choose which hardware endpoint to put it on ...
1703 *
1704 * REVISIT what we really want here is a regular schedule tree
1705 * like e.g. OHCI uses, but for now musb->periodic is just an
1706 * array of the _single_ logical endpoint associated with a
1707 * given physical one (identity mapping logical->physical).
1708 *
1709 * that simplistic approach makes TT scheduling a lot simpler;
1710 * there is none, and thus none of its complexity...
1711 */
1712 best_diff = 4096;
1713 best_end = -1;
1714
1715 for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
1716 int diff;
1717
1718 if (musb->periodic[epnum])
1719 continue;
1720 hw_ep = &musb->endpoints[epnum];
1721 if (hw_ep == musb->bulk_ep)
1722 continue;
1723
1724 if (is_in)
1725 diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
1726 else
1727 diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1728
1729 if (diff > 0 && best_diff > diff) {
1730 best_diff = diff;
1731 best_end = epnum;
1732 }
1733 }
1734 if (best_end < 0)
1735 return -ENOSPC;
1736
1737 idle = 1;
1738 hw_ep = musb->endpoints + best_end;
1739 musb->periodic[best_end] = qh;
1740 DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1741success:
1742 qh->hw_ep = hw_ep;
1743 qh->hep->hcpriv = qh;
1744 if (idle)
1745 musb_start_urb(musb, is_in, qh);
1746 return 0;
1747}
1748
1749static int musb_urb_enqueue(
1750 struct usb_hcd *hcd,
1751 struct urb *urb,
1752 gfp_t mem_flags)
1753{
1754 unsigned long flags;
1755 struct musb *musb = hcd_to_musb(hcd);
1756 struct usb_host_endpoint *hep = urb->ep;
1757 struct musb_qh *qh = hep->hcpriv;
1758 struct usb_endpoint_descriptor *epd = &hep->desc;
1759 int ret;
1760 unsigned type_reg;
1761 unsigned interval;
1762
1763 /* host role must be active */
1764 if (!is_host_active(musb) || !musb->is_active)
1765 return -ENODEV;
1766
1767 spin_lock_irqsave(&musb->lock, flags);
1768 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1769 spin_unlock_irqrestore(&musb->lock, flags);
1770 if (ret)
1771 return ret;
1772
1773 /* DMA mapping was already done, if needed, and this urb is on
1774 * hep->urb_list ... so there's little to do unless hep wasn't
1775 * yet scheduled onto a live qh.
1776 *
1777 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1778 * disabled, testing for empty qh->ring and avoiding qh setup costs
1779 * except for the first urb queued after a config change.
1780 */
1781 if (qh) {
1782 urb->hcpriv = qh;
1783 return 0;
1784 }
1785
1786 /* Allocate and initialize qh, minimizing the work done each time
1787 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1788 *
1789 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1790 * for bugs in other kernel code to break this driver...
1791 */
1792 qh = kzalloc(sizeof *qh, mem_flags);
1793 if (!qh) {
1794 usb_hcd_unlink_urb_from_ep(hcd, urb);
1795 return -ENOMEM;
1796 }
1797
1798 qh->hep = hep;
1799 qh->dev = urb->dev;
1800 INIT_LIST_HEAD(&qh->ring);
1801 qh->is_ready = 1;
1802
1803 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1804
1805 /* no high bandwidth support yet */
1806 if (qh->maxpacket & ~0x7ff) {
1807 ret = -EMSGSIZE;
1808 goto done;
1809 }
1810
1811 qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
1812 qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
1813
1814 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1815 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1816
1817 /* precompute rxtype/txtype/type0 register */
1818 type_reg = (qh->type << 4) | qh->epnum;
1819 switch (urb->dev->speed) {
1820 case USB_SPEED_LOW:
1821 type_reg |= 0xc0;
1822 break;
1823 case USB_SPEED_FULL:
1824 type_reg |= 0x80;
1825 break;
1826 default:
1827 type_reg |= 0x40;
1828 }
1829 qh->type_reg = type_reg;
1830
1831 /* precompute rxinterval/txinterval register */
1832 interval = min((u8)16, epd->bInterval); /* log encoding */
1833 switch (qh->type) {
1834 case USB_ENDPOINT_XFER_INT:
1835 /* fullspeed uses linear encoding */
1836 if (USB_SPEED_FULL == urb->dev->speed) {
1837 interval = epd->bInterval;
1838 if (!interval)
1839 interval = 1;
1840 }
1841 /* FALLTHROUGH */
1842 case USB_ENDPOINT_XFER_ISOC:
1843 /* iso always uses log encoding */
1844 break;
1845 default:
1846 /* REVISIT we actually want to use NAK limits, hinting to the
1847 * transfer scheduling logic to try some other qh, e.g. try
1848 * for 2 msec first:
1849 *
1850 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
1851 *
1852 * The downside of disabling this is that transfer scheduling
1853 * gets VERY unfair for nonperiodic transfers; a misbehaving
1854 * peripheral could make that hurt. Or for reads, one that's
1855 * perfectly normal: network and other drivers keep reads
1856 * posted at all times, having one pending for a week should
1857 * be perfectly safe.
1858 *
1859 * The upside of disabling it is avoidng transfer scheduling
1860 * code to put this aside for while.
1861 */
1862 interval = 0;
1863 }
1864 qh->intv_reg = interval;
1865
1866 /* precompute addressing for external hub/tt ports */
1867 if (musb->is_multipoint) {
1868 struct usb_device *parent = urb->dev->parent;
1869
1870 if (parent != hcd->self.root_hub) {
1871 qh->h_addr_reg = (u8) parent->devnum;
1872
1873 /* set up tt info if needed */
1874 if (urb->dev->tt) {
1875 qh->h_port_reg = (u8) urb->dev->ttport;
1876 qh->h_addr_reg |= 0x80;
1877 }
1878 }
1879 }
1880
1881 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
1882 * until we get real dma queues (with an entry for each urb/buffer),
1883 * we only have work to do in the former case.
1884 */
1885 spin_lock_irqsave(&musb->lock, flags);
1886 if (hep->hcpriv) {
1887 /* some concurrent activity submitted another urb to hep...
1888 * odd, rare, error prone, but legal.
1889 */
1890 kfree(qh);
1891 ret = 0;
1892 } else
1893 ret = musb_schedule(musb, qh,
1894 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
1895
1896 if (ret == 0) {
1897 urb->hcpriv = qh;
1898 /* FIXME set urb->start_frame for iso/intr, it's tested in
1899 * musb_start_urb(), but otherwise only konicawc cares ...
1900 */
1901 }
1902 spin_unlock_irqrestore(&musb->lock, flags);
1903
1904done:
1905 if (ret != 0) {
1906 usb_hcd_unlink_urb_from_ep(hcd, urb);
1907 kfree(qh);
1908 }
1909 return ret;
1910}
1911
1912
1913/*
1914 * abort a transfer that's at the head of a hardware queue.
1915 * called with controller locked, irqs blocked
1916 * that hardware queue advances to the next transfer, unless prevented
1917 */
1918static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
1919{
1920 struct musb_hw_ep *ep = qh->hw_ep;
1921 void __iomem *epio = ep->regs;
1922 unsigned hw_end = ep->epnum;
1923 void __iomem *regs = ep->musb->mregs;
1924 u16 csr;
1925 int status = 0;
1926
1927 musb_ep_select(regs, hw_end);
1928
1929 if (is_dma_capable()) {
1930 struct dma_channel *dma;
1931
1932 dma = is_in ? ep->rx_channel : ep->tx_channel;
1933 if (dma) {
1934 status = ep->musb->dma_controller->channel_abort(dma);
1935 DBG(status ? 1 : 3,
1936 "abort %cX%d DMA for urb %p --> %d\n",
1937 is_in ? 'R' : 'T', ep->epnum,
1938 urb, status);
1939 urb->actual_length += dma->actual_len;
1940 }
1941 }
1942
1943 /* turn off DMA requests, discard state, stop polling ... */
1944 if (is_in) {
1945 /* giveback saves bulk toggle */
1946 csr = musb_h_flush_rxfifo(ep, 0);
1947
1948 /* REVISIT we still get an irq; should likely clear the
1949 * endpoint's irq status here to avoid bogus irqs.
1950 * clearing that status is platform-specific...
1951 */
1952 } else {
1953 musb_h_tx_flush_fifo(ep);
1954 csr = musb_readw(epio, MUSB_TXCSR);
1955 csr &= ~(MUSB_TXCSR_AUTOSET
1956 | MUSB_TXCSR_DMAENAB
1957 | MUSB_TXCSR_H_RXSTALL
1958 | MUSB_TXCSR_H_NAKTIMEOUT
1959 | MUSB_TXCSR_H_ERROR
1960 | MUSB_TXCSR_TXPKTRDY);
1961 musb_writew(epio, MUSB_TXCSR, csr);
1962 /* REVISIT may need to clear FLUSHFIFO ... */
1963 musb_writew(epio, MUSB_TXCSR, csr);
1964 /* flush cpu writebuffer */
1965 csr = musb_readw(epio, MUSB_TXCSR);
1966 }
1967 if (status == 0)
1968 musb_advance_schedule(ep->musb, urb, ep, is_in);
1969 return status;
1970}
1971
1972static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1973{
1974 struct musb *musb = hcd_to_musb(hcd);
1975 struct musb_qh *qh;
1976 struct list_head *sched;
1977 unsigned long flags;
1978 int ret;
1979
1980 DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
1981 usb_pipedevice(urb->pipe),
1982 usb_pipeendpoint(urb->pipe),
1983 usb_pipein(urb->pipe) ? "in" : "out");
1984
1985 spin_lock_irqsave(&musb->lock, flags);
1986 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1987 if (ret)
1988 goto done;
1989
1990 qh = urb->hcpriv;
1991 if (!qh)
1992 goto done;
1993
1994 /* Any URB not actively programmed into endpoint hardware can be
1995 * immediately given back. Such an URB must be at the head of its
1996 * endpoint queue, unless someday we get real DMA queues. And even
1997 * then, it might not be known to the hardware...
1998 *
1999 * Otherwise abort current transfer, pending dma, etc.; urb->status
2000 * has already been updated. This is a synchronous abort; it'd be
2001 * OK to hold off until after some IRQ, though.
2002 */
2003 if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
2004 ret = -EINPROGRESS;
2005 else {
2006 switch (qh->type) {
2007 case USB_ENDPOINT_XFER_CONTROL:
2008 sched = &musb->control;
2009 break;
2010 case USB_ENDPOINT_XFER_BULK:
2011 if (usb_pipein(urb->pipe))
2012 sched = &musb->in_bulk;
2013 else
2014 sched = &musb->out_bulk;
2015 break;
2016 default:
2017 /* REVISIT when we get a schedule tree, periodic
2018 * transfers won't always be at the head of a
2019 * singleton queue...
2020 */
2021 sched = NULL;
2022 break;
2023 }
2024 }
2025
2026 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2027 if (ret < 0 || (sched && qh != first_qh(sched))) {
2028 int ready = qh->is_ready;
2029
2030 ret = 0;
2031 qh->is_ready = 0;
2032 __musb_giveback(musb, urb, 0);
2033 qh->is_ready = ready;
2034 } else
2035 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2036done:
2037 spin_unlock_irqrestore(&musb->lock, flags);
2038 return ret;
2039}
2040
2041/* disable an endpoint */
2042static void
2043musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2044{
2045 u8 epnum = hep->desc.bEndpointAddress;
2046 unsigned long flags;
2047 struct musb *musb = hcd_to_musb(hcd);
2048 u8 is_in = epnum & USB_DIR_IN;
2049 struct musb_qh *qh = hep->hcpriv;
2050 struct urb *urb, *tmp;
2051 struct list_head *sched;
2052
2053 if (!qh)
2054 return;
2055
2056 spin_lock_irqsave(&musb->lock, flags);
2057
2058 switch (qh->type) {
2059 case USB_ENDPOINT_XFER_CONTROL:
2060 sched = &musb->control;
2061 break;
2062 case USB_ENDPOINT_XFER_BULK:
2063 if (is_in)
2064 sched = &musb->in_bulk;
2065 else
2066 sched = &musb->out_bulk;
2067 break;
2068 default:
2069 /* REVISIT when we get a schedule tree, periodic transfers
2070 * won't always be at the head of a singleton queue...
2071 */
2072 sched = NULL;
2073 break;
2074 }
2075
2076 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2077
2078 /* kick first urb off the hardware, if needed */
2079 qh->is_ready = 0;
2080 if (!sched || qh == first_qh(sched)) {
2081 urb = next_urb(qh);
2082
2083 /* make software (then hardware) stop ASAP */
2084 if (!urb->unlinked)
2085 urb->status = -ESHUTDOWN;
2086
2087 /* cleanup */
2088 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2089 } else
2090 urb = NULL;
2091
2092 /* then just nuke all the others */
2093 list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
2094 musb_giveback(qh, urb, -ESHUTDOWN);
2095
2096 spin_unlock_irqrestore(&musb->lock, flags);
2097}
2098
2099static int musb_h_get_frame_number(struct usb_hcd *hcd)
2100{
2101 struct musb *musb = hcd_to_musb(hcd);
2102
2103 return musb_readw(musb->mregs, MUSB_FRAME);
2104}
2105
2106static int musb_h_start(struct usb_hcd *hcd)
2107{
2108 struct musb *musb = hcd_to_musb(hcd);
2109
2110 /* NOTE: musb_start() is called when the hub driver turns
2111 * on port power, or when (OTG) peripheral starts.
2112 */
2113 hcd->state = HC_STATE_RUNNING;
2114 musb->port1_status = 0;
2115 return 0;
2116}
2117
2118static void musb_h_stop(struct usb_hcd *hcd)
2119{
2120 musb_stop(hcd_to_musb(hcd));
2121 hcd->state = HC_STATE_HALT;
2122}
2123
2124static int musb_bus_suspend(struct usb_hcd *hcd)
2125{
2126 struct musb *musb = hcd_to_musb(hcd);
2127
2128 if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
2129 return 0;
2130
2131 if (is_host_active(musb) && musb->is_active) {
2132 WARNING("trying to suspend as %s is_active=%i\n",
2133 otg_state_string(musb), musb->is_active);
2134 return -EBUSY;
2135 } else
2136 return 0;
2137}
2138
2139static int musb_bus_resume(struct usb_hcd *hcd)
2140{
2141 /* resuming child port does the work */
2142 return 0;
2143}
2144
2145const struct hc_driver musb_hc_driver = {
2146 .description = "musb-hcd",
2147 .product_desc = "MUSB HDRC host driver",
2148 .hcd_priv_size = sizeof(struct musb),
2149 .flags = HCD_USB2 | HCD_MEMORY,
2150
2151 /* not using irq handler or reset hooks from usbcore, since
2152 * those must be shared with peripheral code for OTG configs
2153 */
2154
2155 .start = musb_h_start,
2156 .stop = musb_h_stop,
2157
2158 .get_frame_number = musb_h_get_frame_number,
2159
2160 .urb_enqueue = musb_urb_enqueue,
2161 .urb_dequeue = musb_urb_dequeue,
2162 .endpoint_disable = musb_h_disable,
2163
2164 .hub_status_data = musb_hub_status_data,
2165 .hub_control = musb_hub_control,
2166 .bus_suspend = musb_bus_suspend,
2167 .bus_resume = musb_bus_resume,
2168 /* .start_port_reset = NULL, */
2169 /* .hub_irq_enable = NULL, */
2170};
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
new file mode 100644
index 000000000000..77bcdb9d5b32
--- /dev/null
+++ b/drivers/usb/musb/musb_host.h
@@ -0,0 +1,110 @@
1/*
2 * MUSB OTG driver host defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef _MUSB_HOST_H
36#define _MUSB_HOST_H
37
38static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
39{
40 return container_of((void *) musb, struct usb_hcd, hcd_priv);
41}
42
43static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
44{
45 return (struct musb *) (hcd->hcd_priv);
46}
47
48/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
49struct musb_qh {
50 struct usb_host_endpoint *hep; /* usbcore info */
51 struct usb_device *dev;
52 struct musb_hw_ep *hw_ep; /* current binding */
53
54 struct list_head ring; /* of musb_qh */
55 /* struct musb_qh *next; */ /* for periodic tree */
56
57 unsigned offset; /* in urb->transfer_buffer */
58 unsigned segsize; /* current xfer fragment */
59
60 u8 type_reg; /* {rx,tx} type register */
61 u8 intv_reg; /* {rx,tx} interval register */
62 u8 addr_reg; /* device address register */
63 u8 h_addr_reg; /* hub address register */
64 u8 h_port_reg; /* hub port register */
65
66 u8 is_ready; /* safe to modify hw_ep */
67 u8 type; /* XFERTYPE_* */
68 u8 epnum;
69 u16 maxpacket;
70 u16 frame; /* for periodic schedule */
71 unsigned iso_idx; /* in urb->iso_frame_desc[] */
72};
73
74/* map from control or bulk queue head to the first qh on that ring */
75static inline struct musb_qh *first_qh(struct list_head *q)
76{
77 if (list_empty(q))
78 return NULL;
79 return list_entry(q->next, struct musb_qh, ring);
80}
81
82
83extern void musb_root_disconnect(struct musb *musb);
84
85struct usb_hcd;
86
87extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf);
88extern int musb_hub_control(struct usb_hcd *hcd,
89 u16 typeReq, u16 wValue, u16 wIndex,
90 char *buf, u16 wLength);
91
92extern const struct hc_driver musb_hc_driver;
93
94static inline struct urb *next_urb(struct musb_qh *qh)
95{
96#ifdef CONFIG_USB_MUSB_HDRC_HCD
97 struct list_head *queue;
98
99 if (!qh)
100 return NULL;
101 queue = &qh->hep->urb_list;
102 if (list_empty(queue))
103 return NULL;
104 return list_entry(queue->next, struct urb, urb_list);
105#else
106 return NULL;
107#endif
108}
109
110#endif /* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
new file mode 100644
index 000000000000..6bbedae83af8
--- /dev/null
+++ b/drivers/usb/musb/musb_io.h
@@ -0,0 +1,115 @@
1/*
2 * MUSB OTG driver register I/O
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
36#define __MUSB_LINUX_PLATFORM_ARCH_H__
37
38#include <linux/io.h>
39
40#ifndef CONFIG_ARM
41static inline void readsl(const void __iomem *addr, void *buf, int len)
42 { insl((unsigned long)addr, buf, len); }
43static inline void readsw(const void __iomem *addr, void *buf, int len)
44 { insw((unsigned long)addr, buf, len); }
45static inline void readsb(const void __iomem *addr, void *buf, int len)
46 { insb((unsigned long)addr, buf, len); }
47
48static inline void writesl(const void __iomem *addr, const void *buf, int len)
49 { outsl((unsigned long)addr, buf, len); }
50static inline void writesw(const void __iomem *addr, const void *buf, int len)
51 { outsw((unsigned long)addr, buf, len); }
52static inline void writesb(const void __iomem *addr, const void *buf, int len)
53 { outsb((unsigned long)addr, buf, len); }
54
55#endif
56
57/* NOTE: these offsets are all in bytes */
58
59static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
60 { return __raw_readw(addr + offset); }
61
62static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
63 { return __raw_readl(addr + offset); }
64
65
66static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
67 { __raw_writew(data, addr + offset); }
68
69static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
70 { __raw_writel(data, addr + offset); }
71
72
73#ifdef CONFIG_USB_TUSB6010
74
75/*
76 * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
77 */
78static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
79{
80 u16 tmp;
81 u8 val;
82
83 tmp = __raw_readw(addr + (offset & ~1));
84 if (offset & 1)
85 val = (tmp >> 8);
86 else
87 val = tmp & 0xff;
88
89 return val;
90}
91
92static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
93{
94 u16 tmp;
95
96 tmp = __raw_readw(addr + (offset & ~1));
97 if (offset & 1)
98 tmp = (data << 8) | (tmp & 0xff);
99 else
100 tmp = (tmp & 0xff00) | data;
101
102 __raw_writew(tmp, addr + (offset & ~1));
103}
104
105#else
106
107static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
108 { return __raw_readb(addr + offset); }
109
110static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
111 { __raw_writeb(data, addr + offset); }
112
113#endif /* CONFIG_USB_TUSB6010 */
114
115#endif
diff --git a/drivers/usb/musb/musb_procfs.c b/drivers/usb/musb/musb_procfs.c
new file mode 100644
index 000000000000..55e6b78bdccc
--- /dev/null
+++ b/drivers/usb/musb/musb_procfs.c
@@ -0,0 +1,830 @@
1/*
2 * MUSB OTG driver debug support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#include <linux/uaccess.h> /* FIXME remove procfs writes */
39#include <asm/arch/hardware.h>
40
41#include "musb_core.h"
42
43#include "davinci.h"
44
45#ifdef CONFIG_USB_MUSB_HDRC_HCD
46
47static int dump_qh(struct musb_qh *qh, char *buf, unsigned max)
48{
49 int count;
50 int tmp;
51 struct usb_host_endpoint *hep = qh->hep;
52 struct urb *urb;
53
54 count = snprintf(buf, max, " qh %p dev%d ep%d%s max%d\n",
55 qh, qh->dev->devnum, qh->epnum,
56 ({ char *s; switch (qh->type) {
57 case USB_ENDPOINT_XFER_BULK:
58 s = "-bulk"; break;
59 case USB_ENDPOINT_XFER_INT:
60 s = "-int"; break;
61 case USB_ENDPOINT_XFER_CONTROL:
62 s = ""; break;
63 default:
64 s = "iso"; break;
65 }; s; }),
66 qh->maxpacket);
67 if (count <= 0)
68 return 0;
69 buf += count;
70 max -= count;
71
72 list_for_each_entry(urb, &hep->urb_list, urb_list) {
73 tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n",
74 usb_pipein(urb->pipe) ? "in" : "out",
75 urb, urb->actual_length,
76 urb->transfer_buffer_length);
77 if (tmp <= 0)
78 break;
79 tmp = min(tmp, (int)max);
80 count += tmp;
81 buf += tmp;
82 max -= tmp;
83 }
84 return count;
85}
86
87static int
88dump_queue(struct list_head *q, char *buf, unsigned max)
89{
90 int count = 0;
91 struct musb_qh *qh;
92
93 list_for_each_entry(qh, q, ring) {
94 int tmp;
95
96 tmp = dump_qh(qh, buf, max);
97 if (tmp <= 0)
98 break;
99 tmp = min(tmp, (int)max);
100 count += tmp;
101 buf += tmp;
102 max -= tmp;
103 }
104 return count;
105}
106
107#endif /* HCD */
108
109#ifdef CONFIG_USB_GADGET_MUSB_HDRC
110static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max)
111{
112 char *buf = buffer;
113 int code = 0;
114 void __iomem *regs = ep->hw_ep->regs;
115 char *mode = "1buf";
116
117 if (ep->is_in) {
118 if (ep->hw_ep->tx_double_buffered)
119 mode = "2buf";
120 } else {
121 if (ep->hw_ep->rx_double_buffered)
122 mode = "2buf";
123 }
124
125 do {
126 struct usb_request *req;
127
128 code = snprintf(buf, max,
129 "\n%s (hw%d): %s%s, csr %04x maxp %04x\n",
130 ep->name, ep->current_epnum,
131 mode, ep->dma ? " dma" : "",
132 musb_readw(regs,
133 (ep->is_in || !ep->current_epnum)
134 ? MUSB_TXCSR
135 : MUSB_RXCSR),
136 musb_readw(regs, ep->is_in
137 ? MUSB_TXMAXP
138 : MUSB_RXMAXP)
139 );
140 if (code <= 0)
141 break;
142 code = min(code, (int) max);
143 buf += code;
144 max -= code;
145
146 if (is_cppi_enabled() && ep->current_epnum) {
147 unsigned cppi = ep->current_epnum - 1;
148 void __iomem *base = ep->musb->ctrl_base;
149 unsigned off1 = cppi << 2;
150 void __iomem *ram = base;
151 char tmp[16];
152
153 if (ep->is_in) {
154 ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi);
155 tmp[0] = 0;
156 } else {
157 ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi);
158 snprintf(tmp, sizeof tmp, "%d left, ",
159 musb_readl(base,
160 DAVINCI_RXCPPI_BUFCNT0_REG + off1));
161 }
162
163 code = snprintf(buf, max, "%cX DMA%d: %s"
164 "%08x %08x, %08x %08x; "
165 "%08x %08x %08x .. %08x\n",
166 ep->is_in ? 'T' : 'R',
167 ep->current_epnum - 1, tmp,
168 musb_readl(ram, 0 * 4),
169 musb_readl(ram, 1 * 4),
170 musb_readl(ram, 2 * 4),
171 musb_readl(ram, 3 * 4),
172 musb_readl(ram, 4 * 4),
173 musb_readl(ram, 5 * 4),
174 musb_readl(ram, 6 * 4),
175 musb_readl(ram, 7 * 4));
176 if (code <= 0)
177 break;
178 code = min(code, (int) max);
179 buf += code;
180 max -= code;
181 }
182
183 if (list_empty(&ep->req_list)) {
184 code = snprintf(buf, max, "\t(queue empty)\n");
185 if (code <= 0)
186 break;
187 code = min(code, (int) max);
188 buf += code;
189 max -= code;
190 break;
191 }
192 list_for_each_entry(req, &ep->req_list, list) {
193 code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n",
194 req,
195 req->zero ? "zero, " : "",
196 req->short_not_ok ? "!short, " : "",
197 req->actual, req->length);
198 if (code <= 0)
199 break;
200 code = min(code, (int) max);
201 buf += code;
202 max -= code;
203 }
204 } while (0);
205 return buf - buffer;
206}
207#endif
208
209static int
210dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max)
211{
212 int code = 0;
213 char *buf = aBuffer;
214 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
215
216 do {
217 musb_ep_select(musb->mregs, epnum);
218#ifdef CONFIG_USB_MUSB_HDRC_HCD
219 if (is_host_active(musb)) {
220 int dump_rx, dump_tx;
221 void __iomem *regs = hw_ep->regs;
222
223 /* TEMPORARY (!) until we have a real periodic
224 * schedule tree ...
225 */
226 if (!epnum) {
227 /* control is shared, uses RX queue
228 * but (mostly) shadowed tx registers
229 */
230 dump_tx = !list_empty(&musb->control);
231 dump_rx = 0;
232 } else if (hw_ep == musb->bulk_ep) {
233 dump_tx = !list_empty(&musb->out_bulk);
234 dump_rx = !list_empty(&musb->in_bulk);
235 } else if (musb->periodic[epnum]) {
236 struct usb_host_endpoint *hep;
237
238 hep = musb->periodic[epnum]->hep;
239 dump_rx = hep->desc.bEndpointAddress
240 & USB_ENDPOINT_DIR_MASK;
241 dump_tx = !dump_rx;
242 } else
243 break;
244 /* END TEMPORARY */
245
246
247 if (dump_rx) {
248 code = snprintf(buf, max,
249 "\nRX%d: %s rxcsr %04x interval %02x "
250 "max %04x type %02x; "
251 "dev %d hub %d port %d"
252 "\n",
253 epnum,
254 hw_ep->rx_double_buffered
255 ? "2buf" : "1buf",
256 musb_readw(regs, MUSB_RXCSR),
257 musb_readb(regs, MUSB_RXINTERVAL),
258 musb_readw(regs, MUSB_RXMAXP),
259 musb_readb(regs, MUSB_RXTYPE),
260 /* FIXME: assumes multipoint */
261 musb_readb(musb->mregs,
262 MUSB_BUSCTL_OFFSET(epnum,
263 MUSB_RXFUNCADDR)),
264 musb_readb(musb->mregs,
265 MUSB_BUSCTL_OFFSET(epnum,
266 MUSB_RXHUBADDR)),
267 musb_readb(musb->mregs,
268 MUSB_BUSCTL_OFFSET(epnum,
269 MUSB_RXHUBPORT))
270 );
271 if (code <= 0)
272 break;
273 code = min(code, (int) max);
274 buf += code;
275 max -= code;
276
277 if (is_cppi_enabled()
278 && epnum
279 && hw_ep->rx_channel) {
280 unsigned cppi = epnum - 1;
281 unsigned off1 = cppi << 2;
282 void __iomem *base;
283 void __iomem *ram;
284 char tmp[16];
285
286 base = musb->ctrl_base;
287 ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
288 cppi) + base;
289 snprintf(tmp, sizeof tmp, "%d left, ",
290 musb_readl(base,
291 DAVINCI_RXCPPI_BUFCNT0_REG
292 + off1));
293
294 code = snprintf(buf, max,
295 " rx dma%d: %s"
296 "%08x %08x, %08x %08x; "
297 "%08x %08x %08x .. %08x\n",
298 cppi, tmp,
299 musb_readl(ram, 0 * 4),
300 musb_readl(ram, 1 * 4),
301 musb_readl(ram, 2 * 4),
302 musb_readl(ram, 3 * 4),
303 musb_readl(ram, 4 * 4),
304 musb_readl(ram, 5 * 4),
305 musb_readl(ram, 6 * 4),
306 musb_readl(ram, 7 * 4));
307 if (code <= 0)
308 break;
309 code = min(code, (int) max);
310 buf += code;
311 max -= code;
312 }
313
314 if (hw_ep == musb->bulk_ep
315 && !list_empty(
316 &musb->in_bulk)) {
317 code = dump_queue(&musb->in_bulk,
318 buf, max);
319 if (code <= 0)
320 break;
321 code = min(code, (int) max);
322 buf += code;
323 max -= code;
324 } else if (musb->periodic[epnum]) {
325 code = dump_qh(musb->periodic[epnum],
326 buf, max);
327 if (code <= 0)
328 break;
329 code = min(code, (int) max);
330 buf += code;
331 max -= code;
332 }
333 }
334
335 if (dump_tx) {
336 code = snprintf(buf, max,
337 "\nTX%d: %s txcsr %04x interval %02x "
338 "max %04x type %02x; "
339 "dev %d hub %d port %d"
340 "\n",
341 epnum,
342 hw_ep->tx_double_buffered
343 ? "2buf" : "1buf",
344 musb_readw(regs, MUSB_TXCSR),
345 musb_readb(regs, MUSB_TXINTERVAL),
346 musb_readw(regs, MUSB_TXMAXP),
347 musb_readb(regs, MUSB_TXTYPE),
348 /* FIXME: assumes multipoint */
349 musb_readb(musb->mregs,
350 MUSB_BUSCTL_OFFSET(epnum,
351 MUSB_TXFUNCADDR)),
352 musb_readb(musb->mregs,
353 MUSB_BUSCTL_OFFSET(epnum,
354 MUSB_TXHUBADDR)),
355 musb_readb(musb->mregs,
356 MUSB_BUSCTL_OFFSET(epnum,
357 MUSB_TXHUBPORT))
358 );
359 if (code <= 0)
360 break;
361 code = min(code, (int) max);
362 buf += code;
363 max -= code;
364
365 if (is_cppi_enabled()
366 && epnum
367 && hw_ep->tx_channel) {
368 unsigned cppi = epnum - 1;
369 void __iomem *base;
370 void __iomem *ram;
371
372 base = musb->ctrl_base;
373 ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
374 cppi) + base;
375 code = snprintf(buf, max,
376 " tx dma%d: "
377 "%08x %08x, %08x %08x; "
378 "%08x %08x %08x .. %08x\n",
379 cppi,
380 musb_readl(ram, 0 * 4),
381 musb_readl(ram, 1 * 4),
382 musb_readl(ram, 2 * 4),
383 musb_readl(ram, 3 * 4),
384 musb_readl(ram, 4 * 4),
385 musb_readl(ram, 5 * 4),
386 musb_readl(ram, 6 * 4),
387 musb_readl(ram, 7 * 4));
388 if (code <= 0)
389 break;
390 code = min(code, (int) max);
391 buf += code;
392 max -= code;
393 }
394
395 if (hw_ep == musb->control_ep
396 && !list_empty(
397 &musb->control)) {
398 code = dump_queue(&musb->control,
399 buf, max);
400 if (code <= 0)
401 break;
402 code = min(code, (int) max);
403 buf += code;
404 max -= code;
405 } else if (hw_ep == musb->bulk_ep
406 && !list_empty(
407 &musb->out_bulk)) {
408 code = dump_queue(&musb->out_bulk,
409 buf, max);
410 if (code <= 0)
411 break;
412 code = min(code, (int) max);
413 buf += code;
414 max -= code;
415 } else if (musb->periodic[epnum]) {
416 code = dump_qh(musb->periodic[epnum],
417 buf, max);
418 if (code <= 0)
419 break;
420 code = min(code, (int) max);
421 buf += code;
422 max -= code;
423 }
424 }
425 }
426#endif
427#ifdef CONFIG_USB_GADGET_MUSB_HDRC
428 if (is_peripheral_active(musb)) {
429 code = 0;
430
431 if (hw_ep->ep_in.desc || !epnum) {
432 code = dump_ep(&hw_ep->ep_in, buf, max);
433 if (code <= 0)
434 break;
435 code = min(code, (int) max);
436 buf += code;
437 max -= code;
438 }
439 if (hw_ep->ep_out.desc) {
440 code = dump_ep(&hw_ep->ep_out, buf, max);
441 if (code <= 0)
442 break;
443 code = min(code, (int) max);
444 buf += code;
445 max -= code;
446 }
447 }
448#endif
449 } while (0);
450
451 return buf - aBuffer;
452}
453
454/* Dump the current status and compile options.
455 * @param musb the device driver instance
456 * @param buffer where to dump the status; it must be big enough to hold the
457 * result otherwise "BAD THINGS HAPPENS(TM)".
458 */
459static int dump_header_stats(struct musb *musb, char *buffer)
460{
461 int code, count = 0;
462 const void __iomem *mbase = musb->mregs;
463
464 *buffer = 0;
465 count = sprintf(buffer, "Status: %sHDRC, Mode=%s "
466 "(Power=%02x, DevCtl=%02x)\n",
467 (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb),
468 musb_readb(mbase, MUSB_POWER),
469 musb_readb(mbase, MUSB_DEVCTL));
470 if (count <= 0)
471 return 0;
472 buffer += count;
473
474 code = sprintf(buffer, "OTG state: %s; %sactive\n",
475 otg_state_string(musb),
476 musb->is_active ? "" : "in");
477 if (code <= 0)
478 goto done;
479 buffer += code;
480 count += code;
481
482 code = sprintf(buffer,
483 "Options: "
484#ifdef CONFIG_MUSB_PIO_ONLY
485 "pio"
486#elif defined(CONFIG_USB_TI_CPPI_DMA)
487 "cppi-dma"
488#elif defined(CONFIG_USB_INVENTRA_DMA)
489 "musb-dma"
490#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
491 "tusb-omap-dma"
492#else
493 "?dma?"
494#endif
495 ", "
496#ifdef CONFIG_USB_MUSB_OTG
497 "otg (peripheral+host)"
498#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
499 "peripheral"
500#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
501 "host"
502#endif
503 ", debug=%d [eps=%d]\n",
504 debug,
505 musb->nr_endpoints);
506 if (code <= 0)
507 goto done;
508 count += code;
509 buffer += code;
510
511#ifdef CONFIG_USB_GADGET_MUSB_HDRC
512 code = sprintf(buffer, "Peripheral address: %02x\n",
513 musb_readb(musb->ctrl_base, MUSB_FADDR));
514 if (code <= 0)
515 goto done;
516 buffer += code;
517 count += code;
518#endif
519
520#ifdef CONFIG_USB_MUSB_HDRC_HCD
521 code = sprintf(buffer, "Root port status: %08x\n",
522 musb->port1_status);
523 if (code <= 0)
524 goto done;
525 buffer += code;
526 count += code;
527#endif
528
529#ifdef CONFIG_ARCH_DAVINCI
530 code = sprintf(buffer,
531 "DaVinci: ctrl=%02x stat=%1x phy=%03x\n"
532 "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x"
533 "\n",
534 musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG),
535 musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG),
536 __raw_readl((void __force __iomem *)
537 IO_ADDRESS(USBPHY_CTL_PADDR)),
538 musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG),
539 musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG),
540 musb_readl(musb->ctrl_base,
541 DAVINCI_USB_INT_SOURCE_REG),
542 musb_readl(musb->ctrl_base,
543 DAVINCI_USB_INT_MASK_REG));
544 if (code <= 0)
545 goto done;
546 count += code;
547 buffer += code;
548#endif /* DAVINCI */
549
550#ifdef CONFIG_USB_TUSB6010
551 code = sprintf(buffer,
552 "TUSB6010: devconf %08x, phy enable %08x drive %08x"
553 "\n\totg %03x timer %08x"
554 "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x"
555 "\n",
556 musb_readl(musb->ctrl_base, TUSB_DEV_CONF),
557 musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE),
558 musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL),
559 musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT),
560 musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER),
561 musb_readl(musb->ctrl_base, TUSB_PRCM_CONF),
562 musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT),
563 musb_readl(musb->ctrl_base, TUSB_INT_SRC),
564 musb_readl(musb->ctrl_base, TUSB_INT_MASK));
565 if (code <= 0)
566 goto done;
567 count += code;
568 buffer += code;
569#endif /* DAVINCI */
570
571 if (is_cppi_enabled() && musb->dma_controller) {
572 code = sprintf(buffer,
573 "CPPI: txcr=%d txsrc=%01x txena=%01x; "
574 "rxcr=%d rxsrc=%01x rxena=%01x "
575 "\n",
576 musb_readl(musb->ctrl_base,
577 DAVINCI_TXCPPI_CTRL_REG),
578 musb_readl(musb->ctrl_base,
579 DAVINCI_TXCPPI_RAW_REG),
580 musb_readl(musb->ctrl_base,
581 DAVINCI_TXCPPI_INTENAB_REG),
582 musb_readl(musb->ctrl_base,
583 DAVINCI_RXCPPI_CTRL_REG),
584 musb_readl(musb->ctrl_base,
585 DAVINCI_RXCPPI_RAW_REG),
586 musb_readl(musb->ctrl_base,
587 DAVINCI_RXCPPI_INTENAB_REG));
588 if (code <= 0)
589 goto done;
590 count += code;
591 buffer += code;
592 }
593
594#ifdef CONFIG_USB_GADGET_MUSB_HDRC
595 if (is_peripheral_enabled(musb)) {
596 code = sprintf(buffer, "Gadget driver: %s\n",
597 musb->gadget_driver
598 ? musb->gadget_driver->driver.name
599 : "(none)");
600 if (code <= 0)
601 goto done;
602 count += code;
603 buffer += code;
604 }
605#endif
606
607done:
608 return count;
609}
610
611/* Write to ProcFS
612 *
613 * C soft-connect
614 * c soft-disconnect
615 * I enable HS
616 * i disable HS
617 * s stop session
618 * F force session (OTG-unfriendly)
619 * E rElinquish bus (OTG)
620 * H request host mode
621 * h cancel host request
622 * T start sending TEST_PACKET
623 * D<num> set/query the debug level
624 */
625static int musb_proc_write(struct file *file, const char __user *buffer,
626 unsigned long count, void *data)
627{
628 char cmd;
629 u8 reg;
630 struct musb *musb = (struct musb *)data;
631 void __iomem *mbase = musb->mregs;
632
633 /* MOD_INC_USE_COUNT; */
634
635 if (unlikely(copy_from_user(&cmd, buffer, 1)))
636 return -EFAULT;
637
638 switch (cmd) {
639 case 'C':
640 if (mbase) {
641 reg = musb_readb(mbase, MUSB_POWER)
642 | MUSB_POWER_SOFTCONN;
643 musb_writeb(mbase, MUSB_POWER, reg);
644 }
645 break;
646
647 case 'c':
648 if (mbase) {
649 reg = musb_readb(mbase, MUSB_POWER)
650 & ~MUSB_POWER_SOFTCONN;
651 musb_writeb(mbase, MUSB_POWER, reg);
652 }
653 break;
654
655 case 'I':
656 if (mbase) {
657 reg = musb_readb(mbase, MUSB_POWER)
658 | MUSB_POWER_HSENAB;
659 musb_writeb(mbase, MUSB_POWER, reg);
660 }
661 break;
662
663 case 'i':
664 if (mbase) {
665 reg = musb_readb(mbase, MUSB_POWER)
666 & ~MUSB_POWER_HSENAB;
667 musb_writeb(mbase, MUSB_POWER, reg);
668 }
669 break;
670
671 case 'F':
672 reg = musb_readb(mbase, MUSB_DEVCTL);
673 reg |= MUSB_DEVCTL_SESSION;
674 musb_writeb(mbase, MUSB_DEVCTL, reg);
675 break;
676
677 case 'H':
678 if (mbase) {
679 reg = musb_readb(mbase, MUSB_DEVCTL);
680 reg |= MUSB_DEVCTL_HR;
681 musb_writeb(mbase, MUSB_DEVCTL, reg);
682 /* MUSB_HST_MODE( ((struct musb*)data) ); */
683 /* WARNING("Host Mode\n"); */
684 }
685 break;
686
687 case 'h':
688 if (mbase) {
689 reg = musb_readb(mbase, MUSB_DEVCTL);
690 reg &= ~MUSB_DEVCTL_HR;
691 musb_writeb(mbase, MUSB_DEVCTL, reg);
692 }
693 break;
694
695 case 'T':
696 if (mbase) {
697 musb_load_testpacket(musb);
698 musb_writeb(mbase, MUSB_TESTMODE,
699 MUSB_TEST_PACKET);
700 }
701 break;
702
703#if (MUSB_DEBUG > 0)
704 /* set/read debug level */
705 case 'D':{
706 if (count > 1) {
707 char digits[8], *p = digits;
708 int i = 0, level = 0, sign = 1;
709 int len = min(count - 1, (unsigned long)8);
710
711 if (copy_from_user(&digits, &buffer[1], len))
712 return -EFAULT;
713
714 /* optional sign */
715 if (*p == '-') {
716 len -= 1;
717 sign = -sign;
718 p++;
719 }
720
721 /* read it */
722 while (i++ < len && *p > '0' && *p < '9') {
723 level = level * 10 + (*p - '0');
724 p++;
725 }
726
727 level *= sign;
728 DBG(1, "debug level %d\n", level);
729 debug = level;
730 }
731 }
732 break;
733
734
735 case '?':
736 INFO("?: you are seeing it\n");
737 INFO("C/c: soft connect enable/disable\n");
738 INFO("I/i: hispeed enable/disable\n");
739 INFO("F: force session start\n");
740 INFO("H: host mode\n");
741 INFO("T: start sending TEST_PACKET\n");
742 INFO("D: set/read dbug level\n");
743 break;
744#endif
745
746 default:
747 ERR("Command %c not implemented\n", cmd);
748 break;
749 }
750
751 musb_platform_try_idle(musb, 0);
752
753 return count;
754}
755
756static int musb_proc_read(char *page, char **start,
757 off_t off, int count, int *eof, void *data)
758{
759 char *buffer = page;
760 int code = 0;
761 unsigned long flags;
762 struct musb *musb = data;
763 unsigned epnum;
764
765 count -= off;
766 count -= 1; /* for NUL at end */
767 if (count <= 0)
768 return -EINVAL;
769
770 spin_lock_irqsave(&musb->lock, flags);
771
772 code = dump_header_stats(musb, buffer);
773 if (code > 0) {
774 buffer += code;
775 count -= code;
776 }
777
778 /* generate the report for the end points */
779 /* REVISIT ... not unless something's connected! */
780 for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints;
781 epnum++) {
782 code = dump_end_info(musb, epnum, buffer, count);
783 if (code > 0) {
784 buffer += code;
785 count -= code;
786 }
787 }
788
789 musb_platform_try_idle(musb, 0);
790
791 spin_unlock_irqrestore(&musb->lock, flags);
792 *eof = 1;
793
794 return buffer - page;
795}
796
797void __devexit musb_debug_delete(char *name, struct musb *musb)
798{
799 if (musb->proc_entry)
800 remove_proc_entry(name, NULL);
801}
802
803struct proc_dir_entry *__init
804musb_debug_create(char *name, struct musb *data)
805{
806 struct proc_dir_entry *pde;
807
808 /* FIXME convert everything to seq_file; then later, debugfs */
809
810 if (!name)
811 return NULL;
812
813 pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL);
814 data->proc_entry = pde;
815 if (pde) {
816 pde->data = data;
817 /* pde->owner = THIS_MODULE; */
818
819 pde->read_proc = musb_proc_read;
820 pde->write_proc = musb_proc_write;
821
822 pde->size = 0;
823
824 pr_debug("Registered /proc/%s\n", name);
825 } else {
826 pr_debug("Cannot create a valid proc file entry");
827 }
828
829 return pde;
830}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
new file mode 100644
index 000000000000..9c228661aa5a
--- /dev/null
+++ b/drivers/usb/musb/musb_regs.h
@@ -0,0 +1,300 @@
1/*
2 * MUSB OTG driver register defines
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#ifndef __MUSB_REGS_H__
36#define __MUSB_REGS_H__
37
38#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */
39
40/*
41 * Common USB registers
42 */
43
44#define MUSB_FADDR 0x00 /* 8-bit */
45#define MUSB_POWER 0x01 /* 8-bit */
46
47#define MUSB_INTRTX 0x02 /* 16-bit */
48#define MUSB_INTRRX 0x04
49#define MUSB_INTRTXE 0x06
50#define MUSB_INTRRXE 0x08
51#define MUSB_INTRUSB 0x0A /* 8 bit */
52#define MUSB_INTRUSBE 0x0B /* 8 bit */
53#define MUSB_FRAME 0x0C
54#define MUSB_INDEX 0x0E /* 8 bit */
55#define MUSB_TESTMODE 0x0F /* 8 bit */
56
57/* Get offset for a given FIFO from musb->mregs */
58#ifdef CONFIG_USB_TUSB6010
59#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
60#else
61#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
62#endif
63
64/*
65 * Additional Control Registers
66 */
67
68#define MUSB_DEVCTL 0x60 /* 8 bit */
69
70/* These are always controlled through the INDEX register */
71#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */
72#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */
73#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */
74#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */
75
76/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
77#define MUSB_HWVERS 0x6C /* 8 bit */
78
79#define MUSB_EPINFO 0x78 /* 8 bit */
80#define MUSB_RAMINFO 0x79 /* 8 bit */
81#define MUSB_LINKINFO 0x7a /* 8 bit */
82#define MUSB_VPLEN 0x7b /* 8 bit */
83#define MUSB_HS_EOF1 0x7c /* 8 bit */
84#define MUSB_FS_EOF1 0x7d /* 8 bit */
85#define MUSB_LS_EOF1 0x7e /* 8 bit */
86
87/* Offsets to endpoint registers */
88#define MUSB_TXMAXP 0x00
89#define MUSB_TXCSR 0x02
90#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */
91#define MUSB_RXMAXP 0x04
92#define MUSB_RXCSR 0x06
93#define MUSB_RXCOUNT 0x08
94#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */
95#define MUSB_TXTYPE 0x0A
96#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */
97#define MUSB_TXINTERVAL 0x0B
98#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */
99#define MUSB_RXTYPE 0x0C
100#define MUSB_RXINTERVAL 0x0D
101#define MUSB_FIFOSIZE 0x0F
102#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
103
104/* Offsets to endpoint registers in indexed model (using INDEX register) */
105#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
106 (0x10 + (_offset))
107
108/* Offsets to endpoint registers in flat models */
109#define MUSB_FLAT_OFFSET(_epnum, _offset) \
110 (0x100 + (0x10*(_epnum)) + (_offset))
111
112#ifdef CONFIG_USB_TUSB6010
113/* TUSB6010 EP0 configuration register is special */
114#define MUSB_TUSB_OFFSET(_epnum, _offset) \
115 (0x10 + _offset)
116#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
117#endif
118
119/* "bus control"/target registers, for host side multipoint (external hubs) */
120#define MUSB_TXFUNCADDR 0x00
121#define MUSB_TXHUBADDR 0x02
122#define MUSB_TXHUBPORT 0x03
123
124#define MUSB_RXFUNCADDR 0x04
125#define MUSB_RXHUBADDR 0x06
126#define MUSB_RXHUBPORT 0x07
127
128#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
129 (0x80 + (8*(_epnum)) + (_offset))
130
131/*
132 * MUSB Register bits
133 */
134
135/* POWER */
136#define MUSB_POWER_ISOUPDATE 0x80
137#define MUSB_POWER_SOFTCONN 0x40
138#define MUSB_POWER_HSENAB 0x20
139#define MUSB_POWER_HSMODE 0x10
140#define MUSB_POWER_RESET 0x08
141#define MUSB_POWER_RESUME 0x04
142#define MUSB_POWER_SUSPENDM 0x02
143#define MUSB_POWER_ENSUSPEND 0x01
144
145/* INTRUSB */
146#define MUSB_INTR_SUSPEND 0x01
147#define MUSB_INTR_RESUME 0x02
148#define MUSB_INTR_RESET 0x04
149#define MUSB_INTR_BABBLE 0x04
150#define MUSB_INTR_SOF 0x08
151#define MUSB_INTR_CONNECT 0x10
152#define MUSB_INTR_DISCONNECT 0x20
153#define MUSB_INTR_SESSREQ 0x40
154#define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */
155
156/* DEVCTL */
157#define MUSB_DEVCTL_BDEVICE 0x80
158#define MUSB_DEVCTL_FSDEV 0x40
159#define MUSB_DEVCTL_LSDEV 0x20
160#define MUSB_DEVCTL_VBUS 0x18
161#define MUSB_DEVCTL_VBUS_SHIFT 3
162#define MUSB_DEVCTL_HM 0x04
163#define MUSB_DEVCTL_HR 0x02
164#define MUSB_DEVCTL_SESSION 0x01
165
166/* TESTMODE */
167#define MUSB_TEST_FORCE_HOST 0x80
168#define MUSB_TEST_FIFO_ACCESS 0x40
169#define MUSB_TEST_FORCE_FS 0x20
170#define MUSB_TEST_FORCE_HS 0x10
171#define MUSB_TEST_PACKET 0x08
172#define MUSB_TEST_K 0x04
173#define MUSB_TEST_J 0x02
174#define MUSB_TEST_SE0_NAK 0x01
175
176/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */
177#define MUSB_FIFOSZ_DPB 0x10
178/* Allocation size (8, 16, 32, ... 4096) */
179#define MUSB_FIFOSZ_SIZE 0x0f
180
181/* CSR0 */
182#define MUSB_CSR0_FLUSHFIFO 0x0100
183#define MUSB_CSR0_TXPKTRDY 0x0002
184#define MUSB_CSR0_RXPKTRDY 0x0001
185
186/* CSR0 in Peripheral mode */
187#define MUSB_CSR0_P_SVDSETUPEND 0x0080
188#define MUSB_CSR0_P_SVDRXPKTRDY 0x0040
189#define MUSB_CSR0_P_SENDSTALL 0x0020
190#define MUSB_CSR0_P_SETUPEND 0x0010
191#define MUSB_CSR0_P_DATAEND 0x0008
192#define MUSB_CSR0_P_SENTSTALL 0x0004
193
194/* CSR0 in Host mode */
195#define MUSB_CSR0_H_DIS_PING 0x0800
196#define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */
197#define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */
198#define MUSB_CSR0_H_NAKTIMEOUT 0x0080
199#define MUSB_CSR0_H_STATUSPKT 0x0040
200#define MUSB_CSR0_H_REQPKT 0x0020
201#define MUSB_CSR0_H_ERROR 0x0010
202#define MUSB_CSR0_H_SETUPPKT 0x0008
203#define MUSB_CSR0_H_RXSTALL 0x0004
204
205/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
206#define MUSB_CSR0_P_WZC_BITS \
207 (MUSB_CSR0_P_SENTSTALL)
208#define MUSB_CSR0_H_WZC_BITS \
209 (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \
210 | MUSB_CSR0_RXPKTRDY)
211
212/* TxType/RxType */
213#define MUSB_TYPE_SPEED 0xc0
214#define MUSB_TYPE_SPEED_SHIFT 6
215#define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */
216#define MUSB_TYPE_PROTO_SHIFT 4
217#define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */
218
219/* CONFIGDATA */
220#define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */
221#define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */
222#define MUSB_CONFIGDATA_BIGENDIAN 0x20
223#define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */
224#define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */
225#define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */
226#define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */
227#define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */
228
229/* TXCSR in Peripheral and Host mode */
230#define MUSB_TXCSR_AUTOSET 0x8000
231#define MUSB_TXCSR_MODE 0x2000
232#define MUSB_TXCSR_DMAENAB 0x1000
233#define MUSB_TXCSR_FRCDATATOG 0x0800
234#define MUSB_TXCSR_DMAMODE 0x0400
235#define MUSB_TXCSR_CLRDATATOG 0x0040
236#define MUSB_TXCSR_FLUSHFIFO 0x0008
237#define MUSB_TXCSR_FIFONOTEMPTY 0x0002
238#define MUSB_TXCSR_TXPKTRDY 0x0001
239
240/* TXCSR in Peripheral mode */
241#define MUSB_TXCSR_P_ISO 0x4000
242#define MUSB_TXCSR_P_INCOMPTX 0x0080
243#define MUSB_TXCSR_P_SENTSTALL 0x0020
244#define MUSB_TXCSR_P_SENDSTALL 0x0010
245#define MUSB_TXCSR_P_UNDERRUN 0x0004
246
247/* TXCSR in Host mode */
248#define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200
249#define MUSB_TXCSR_H_DATATOGGLE 0x0100
250#define MUSB_TXCSR_H_NAKTIMEOUT 0x0080
251#define MUSB_TXCSR_H_RXSTALL 0x0020
252#define MUSB_TXCSR_H_ERROR 0x0004
253
254/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
255#define MUSB_TXCSR_P_WZC_BITS \
256 (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \
257 | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY)
258#define MUSB_TXCSR_H_WZC_BITS \
259 (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \
260 | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY)
261
262/* RXCSR in Peripheral and Host mode */
263#define MUSB_RXCSR_AUTOCLEAR 0x8000
264#define MUSB_RXCSR_DMAENAB 0x2000
265#define MUSB_RXCSR_DISNYET 0x1000
266#define MUSB_RXCSR_PID_ERR 0x1000
267#define MUSB_RXCSR_DMAMODE 0x0800
268#define MUSB_RXCSR_INCOMPRX 0x0100
269#define MUSB_RXCSR_CLRDATATOG 0x0080
270#define MUSB_RXCSR_FLUSHFIFO 0x0010
271#define MUSB_RXCSR_DATAERROR 0x0008
272#define MUSB_RXCSR_FIFOFULL 0x0002
273#define MUSB_RXCSR_RXPKTRDY 0x0001
274
275/* RXCSR in Peripheral mode */
276#define MUSB_RXCSR_P_ISO 0x4000
277#define MUSB_RXCSR_P_SENTSTALL 0x0040
278#define MUSB_RXCSR_P_SENDSTALL 0x0020
279#define MUSB_RXCSR_P_OVERRUN 0x0004
280
281/* RXCSR in Host mode */
282#define MUSB_RXCSR_H_AUTOREQ 0x4000
283#define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400
284#define MUSB_RXCSR_H_DATATOGGLE 0x0200
285#define MUSB_RXCSR_H_RXSTALL 0x0040
286#define MUSB_RXCSR_H_REQPKT 0x0020
287#define MUSB_RXCSR_H_ERROR 0x0004
288
289/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
290#define MUSB_RXCSR_P_WZC_BITS \
291 (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \
292 | MUSB_RXCSR_RXPKTRDY)
293#define MUSB_RXCSR_H_WZC_BITS \
294 (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \
295 | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY)
296
297/* HUBADDR */
298#define MUSB_HUBADDR_MULTI_TT 0x80
299
300#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
new file mode 100644
index 000000000000..e0e9ce584175
--- /dev/null
+++ b/drivers/usb/musb/musb_virthub.c
@@ -0,0 +1,425 @@
1/*
2 * MUSB OTG driver virtual root hub support
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/time.h>
42#include <linux/timer.h>
43
44#include <asm/unaligned.h>
45
46#include "musb_core.h"
47
48
49static void musb_port_suspend(struct musb *musb, bool do_suspend)
50{
51 u8 power;
52 void __iomem *mbase = musb->mregs;
53
54 if (!is_host_active(musb))
55 return;
56
57 /* NOTE: this doesn't necessarily put PHY into low power mode,
58 * turning off its clock; that's a function of PHY integration and
59 * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect
60 * SE0 changing to connect (J) or wakeup (K) states.
61 */
62 power = musb_readb(mbase, MUSB_POWER);
63 if (do_suspend) {
64 int retries = 10000;
65
66 power &= ~MUSB_POWER_RESUME;
67 power |= MUSB_POWER_SUSPENDM;
68 musb_writeb(mbase, MUSB_POWER, power);
69
70 /* Needed for OPT A tests */
71 power = musb_readb(mbase, MUSB_POWER);
72 while (power & MUSB_POWER_SUSPENDM) {
73 power = musb_readb(mbase, MUSB_POWER);
74 if (retries-- < 1)
75 break;
76 }
77
78 DBG(3, "Root port suspended, power %02x\n", power);
79
80 musb->port1_status |= USB_PORT_STAT_SUSPEND;
81 switch (musb->xceiv.state) {
82 case OTG_STATE_A_HOST:
83 musb->xceiv.state = OTG_STATE_A_SUSPEND;
84 musb->is_active = is_otg_enabled(musb)
85 && musb->xceiv.host->b_hnp_enable;
86 musb_platform_try_idle(musb, 0);
87 break;
88#ifdef CONFIG_USB_MUSB_OTG
89 case OTG_STATE_B_HOST:
90 musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
91 musb->is_active = is_otg_enabled(musb)
92 && musb->xceiv.host->b_hnp_enable;
93 musb_platform_try_idle(musb, 0);
94 break;
95#endif
96 default:
97 DBG(1, "bogus rh suspend? %s\n",
98 otg_state_string(musb));
99 }
100 } else if (power & MUSB_POWER_SUSPENDM) {
101 power &= ~MUSB_POWER_SUSPENDM;
102 power |= MUSB_POWER_RESUME;
103 musb_writeb(mbase, MUSB_POWER, power);
104
105 DBG(3, "Root port resuming, power %02x\n", power);
106
107 /* later, GetPortStatus will stop RESUME signaling */
108 musb->port1_status |= MUSB_PORT_STAT_RESUME;
109 musb->rh_timer = jiffies + msecs_to_jiffies(20);
110 }
111}
112
113static void musb_port_reset(struct musb *musb, bool do_reset)
114{
115 u8 power;
116 void __iomem *mbase = musb->mregs;
117
118#ifdef CONFIG_USB_MUSB_OTG
119 if (musb->xceiv.state == OTG_STATE_B_IDLE) {
120 DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
121 musb->port1_status &= ~USB_PORT_STAT_RESET;
122 return;
123 }
124#endif
125
126 if (!is_host_active(musb))
127 return;
128
129 /* NOTE: caller guarantees it will turn off the reset when
130 * the appropriate amount of time has passed
131 */
132 power = musb_readb(mbase, MUSB_POWER);
133 if (do_reset) {
134
135 /*
136 * If RESUME is set, we must make sure it stays minimum 20 ms.
137 * Then we must clear RESUME and wait a bit to let musb start
138 * generating SOFs. If we don't do this, OPT HS A 6.8 tests
139 * fail with "Error! Did not receive an SOF before suspend
140 * detected".
141 */
142 if (power & MUSB_POWER_RESUME) {
143 while (time_before(jiffies, musb->rh_timer))
144 msleep(1);
145 musb_writeb(mbase, MUSB_POWER,
146 power & ~MUSB_POWER_RESUME);
147 msleep(1);
148 }
149
150 musb->ignore_disconnect = true;
151 power &= 0xf0;
152 musb_writeb(mbase, MUSB_POWER,
153 power | MUSB_POWER_RESET);
154
155 musb->port1_status |= USB_PORT_STAT_RESET;
156 musb->port1_status &= ~USB_PORT_STAT_ENABLE;
157 musb->rh_timer = jiffies + msecs_to_jiffies(50);
158 } else {
159 DBG(4, "root port reset stopped\n");
160 musb_writeb(mbase, MUSB_POWER,
161 power & ~MUSB_POWER_RESET);
162
163 musb->ignore_disconnect = false;
164
165 power = musb_readb(mbase, MUSB_POWER);
166 if (power & MUSB_POWER_HSMODE) {
167 DBG(4, "high-speed device connected\n");
168 musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
169 }
170
171 musb->port1_status &= ~USB_PORT_STAT_RESET;
172 musb->port1_status |= USB_PORT_STAT_ENABLE
173 | (USB_PORT_STAT_C_RESET << 16)
174 | (USB_PORT_STAT_C_ENABLE << 16);
175 usb_hcd_poll_rh_status(musb_to_hcd(musb));
176
177 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
178 }
179}
180
181void musb_root_disconnect(struct musb *musb)
182{
183 musb->port1_status = (1 << USB_PORT_FEAT_POWER)
184 | (1 << USB_PORT_FEAT_C_CONNECTION);
185
186 usb_hcd_poll_rh_status(musb_to_hcd(musb));
187 musb->is_active = 0;
188
189 switch (musb->xceiv.state) {
190 case OTG_STATE_A_HOST:
191 case OTG_STATE_A_SUSPEND:
192 musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
193 musb->is_active = 0;
194 break;
195 case OTG_STATE_A_WAIT_VFALL:
196 musb->xceiv.state = OTG_STATE_B_IDLE;
197 break;
198 default:
199 DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
200 }
201}
202
203
204/*---------------------------------------------------------------------*/
205
206/* Caller may or may not hold musb->lock */
207int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
208{
209 struct musb *musb = hcd_to_musb(hcd);
210 int retval = 0;
211
212 /* called in_irq() via usb_hcd_poll_rh_status() */
213 if (musb->port1_status & 0xffff0000) {
214 *buf = 0x02;
215 retval = 1;
216 }
217 return retval;
218}
219
220int musb_hub_control(
221 struct usb_hcd *hcd,
222 u16 typeReq,
223 u16 wValue,
224 u16 wIndex,
225 char *buf,
226 u16 wLength)
227{
228 struct musb *musb = hcd_to_musb(hcd);
229 u32 temp;
230 int retval = 0;
231 unsigned long flags;
232
233 spin_lock_irqsave(&musb->lock, flags);
234
235 if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
236 spin_unlock_irqrestore(&musb->lock, flags);
237 return -ESHUTDOWN;
238 }
239
240 /* hub features: always zero, setting is a NOP
241 * port features: reported, sometimes updated when host is active
242 * no indicators
243 */
244 switch (typeReq) {
245 case ClearHubFeature:
246 case SetHubFeature:
247 switch (wValue) {
248 case C_HUB_OVER_CURRENT:
249 case C_HUB_LOCAL_POWER:
250 break;
251 default:
252 goto error;
253 }
254 break;
255 case ClearPortFeature:
256 if ((wIndex & 0xff) != 1)
257 goto error;
258
259 switch (wValue) {
260 case USB_PORT_FEAT_ENABLE:
261 break;
262 case USB_PORT_FEAT_SUSPEND:
263 musb_port_suspend(musb, false);
264 break;
265 case USB_PORT_FEAT_POWER:
266 if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
267 musb_set_vbus(musb, 0);
268 break;
269 case USB_PORT_FEAT_C_CONNECTION:
270 case USB_PORT_FEAT_C_ENABLE:
271 case USB_PORT_FEAT_C_OVER_CURRENT:
272 case USB_PORT_FEAT_C_RESET:
273 case USB_PORT_FEAT_C_SUSPEND:
274 break;
275 default:
276 goto error;
277 }
278 DBG(5, "clear feature %d\n", wValue);
279 musb->port1_status &= ~(1 << wValue);
280 break;
281 case GetHubDescriptor:
282 {
283 struct usb_hub_descriptor *desc = (void *)buf;
284
285 desc->bDescLength = 9;
286 desc->bDescriptorType = 0x29;
287 desc->bNbrPorts = 1;
288 desc->wHubCharacteristics = __constant_cpu_to_le16(
289 0x0001 /* per-port power switching */
290 | 0x0010 /* no overcurrent reporting */
291 );
292 desc->bPwrOn2PwrGood = 5; /* msec/2 */
293 desc->bHubContrCurrent = 0;
294
295 /* workaround bogus struct definition */
296 desc->DeviceRemovable[0] = 0x02; /* port 1 */
297 desc->DeviceRemovable[1] = 0xff;
298 }
299 break;
300 case GetHubStatus:
301 temp = 0;
302 *(__le32 *) buf = cpu_to_le32(temp);
303 break;
304 case GetPortStatus:
305 if (wIndex != 1)
306 goto error;
307
308 /* finish RESET signaling? */
309 if ((musb->port1_status & USB_PORT_STAT_RESET)
310 && time_after_eq(jiffies, musb->rh_timer))
311 musb_port_reset(musb, false);
312
313 /* finish RESUME signaling? */
314 if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
315 && time_after_eq(jiffies, musb->rh_timer)) {
316 u8 power;
317
318 power = musb_readb(musb->mregs, MUSB_POWER);
319 power &= ~MUSB_POWER_RESUME;
320 DBG(4, "root port resume stopped, power %02x\n",
321 power);
322 musb_writeb(musb->mregs, MUSB_POWER, power);
323
324 /* ISSUE: DaVinci (RTL 1.300) disconnects after
325 * resume of high speed peripherals (but not full
326 * speed ones).
327 */
328
329 musb->is_active = 1;
330 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
331 | MUSB_PORT_STAT_RESUME);
332 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
333 usb_hcd_poll_rh_status(musb_to_hcd(musb));
334 /* NOTE: it might really be A_WAIT_BCON ... */
335 musb->xceiv.state = OTG_STATE_A_HOST;
336 }
337
338 put_unaligned(cpu_to_le32(musb->port1_status
339 & ~MUSB_PORT_STAT_RESUME),
340 (__le32 *) buf);
341
342 /* port change status is more interesting */
343 DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
344 musb->port1_status);
345 break;
346 case SetPortFeature:
347 if ((wIndex & 0xff) != 1)
348 goto error;
349
350 switch (wValue) {
351 case USB_PORT_FEAT_POWER:
352 /* NOTE: this controller has a strange state machine
353 * that involves "requesting sessions" according to
354 * magic side effects from incompletely-described
355 * rules about startup...
356 *
357 * This call is what really starts the host mode; be
358 * very careful about side effects if you reorder any
359 * initialization logic, e.g. for OTG, or change any
360 * logic relating to VBUS power-up.
361 */
362 if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
363 musb_start(musb);
364 break;
365 case USB_PORT_FEAT_RESET:
366 musb_port_reset(musb, true);
367 break;
368 case USB_PORT_FEAT_SUSPEND:
369 musb_port_suspend(musb, true);
370 break;
371 case USB_PORT_FEAT_TEST:
372 if (unlikely(is_host_active(musb)))
373 goto error;
374
375 wIndex >>= 8;
376 switch (wIndex) {
377 case 1:
378 pr_debug("TEST_J\n");
379 temp = MUSB_TEST_J;
380 break;
381 case 2:
382 pr_debug("TEST_K\n");
383 temp = MUSB_TEST_K;
384 break;
385 case 3:
386 pr_debug("TEST_SE0_NAK\n");
387 temp = MUSB_TEST_SE0_NAK;
388 break;
389 case 4:
390 pr_debug("TEST_PACKET\n");
391 temp = MUSB_TEST_PACKET;
392 musb_load_testpacket(musb);
393 break;
394 case 5:
395 pr_debug("TEST_FORCE_ENABLE\n");
396 temp = MUSB_TEST_FORCE_HOST
397 | MUSB_TEST_FORCE_HS;
398
399 musb_writeb(musb->mregs, MUSB_DEVCTL,
400 MUSB_DEVCTL_SESSION);
401 break;
402 case 6:
403 pr_debug("TEST_FIFO_ACCESS\n");
404 temp = MUSB_TEST_FIFO_ACCESS;
405 break;
406 default:
407 goto error;
408 }
409 musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
410 break;
411 default:
412 goto error;
413 }
414 DBG(5, "set feature %d\n", wValue);
415 musb->port1_status |= 1 << wValue;
416 break;
417
418 default:
419error:
420 /* "protocol stall" on error */
421 retval = -EPIPE;
422 }
423 spin_unlock_irqrestore(&musb->lock, flags);
424 return retval;
425}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
new file mode 100644
index 000000000000..9ba8fb7fcd24
--- /dev/null
+++ b/drivers/usb/musb/musbhsdma.c
@@ -0,0 +1,433 @@
1/*
2 * MUSB OTG driver - support for Mentor's DMA controller
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2007 by Texas Instruments
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
24 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
27 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
28 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33#include <linux/device.h>
34#include <linux/interrupt.h>
35#include <linux/platform_device.h>
36#include "musb_core.h"
37
38#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
39#include "omap2430.h"
40#endif
41
42#define MUSB_HSDMA_BASE 0x200
43#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
44#define MUSB_HSDMA_CONTROL 0x4
45#define MUSB_HSDMA_ADDRESS 0x8
46#define MUSB_HSDMA_COUNT 0xc
47
48#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset) \
49 (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset)
50
51/* control register (16-bit): */
52#define MUSB_HSDMA_ENABLE_SHIFT 0
53#define MUSB_HSDMA_TRANSMIT_SHIFT 1
54#define MUSB_HSDMA_MODE1_SHIFT 2
55#define MUSB_HSDMA_IRQENABLE_SHIFT 3
56#define MUSB_HSDMA_ENDPOINT_SHIFT 4
57#define MUSB_HSDMA_BUSERROR_SHIFT 8
58#define MUSB_HSDMA_BURSTMODE_SHIFT 9
59#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
60#define MUSB_HSDMA_BURSTMODE_UNSPEC 0
61#define MUSB_HSDMA_BURSTMODE_INCR4 1
62#define MUSB_HSDMA_BURSTMODE_INCR8 2
63#define MUSB_HSDMA_BURSTMODE_INCR16 3
64
65#define MUSB_HSDMA_CHANNELS 8
66
67struct musb_dma_controller;
68
69struct musb_dma_channel {
70 struct dma_channel Channel;
71 struct musb_dma_controller *controller;
72 u32 dwStartAddress;
73 u32 len;
74 u16 wMaxPacketSize;
75 u8 bIndex;
76 u8 epnum;
77 u8 transmit;
78};
79
80struct musb_dma_controller {
81 struct dma_controller Controller;
82 struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS];
83 void *pDmaPrivate;
84 void __iomem *pCoreBase;
85 u8 bChannelCount;
86 u8 bmUsedChannels;
87 u8 irq;
88};
89
90static int dma_controller_start(struct dma_controller *c)
91{
92 /* nothing to do */
93 return 0;
94}
95
96static void dma_channel_release(struct dma_channel *pChannel);
97
98static int dma_controller_stop(struct dma_controller *c)
99{
100 struct musb_dma_controller *controller =
101 container_of(c, struct musb_dma_controller, Controller);
102 struct musb *musb = (struct musb *) controller->pDmaPrivate;
103 struct dma_channel *pChannel;
104 u8 bBit;
105
106 if (controller->bmUsedChannels != 0) {
107 dev_err(musb->controller,
108 "Stopping DMA controller while channel active\n");
109
110 for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
111 if (controller->bmUsedChannels & (1 << bBit)) {
112 pChannel = &controller->aChannel[bBit].Channel;
113 dma_channel_release(pChannel);
114
115 if (!controller->bmUsedChannels)
116 break;
117 }
118 }
119 }
120 return 0;
121}
122
123static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
124 struct musb_hw_ep *hw_ep, u8 transmit)
125{
126 u8 bBit;
127 struct dma_channel *pChannel = NULL;
128 struct musb_dma_channel *pImplChannel = NULL;
129 struct musb_dma_controller *controller =
130 container_of(c, struct musb_dma_controller, Controller);
131
132 for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
133 if (!(controller->bmUsedChannels & (1 << bBit))) {
134 controller->bmUsedChannels |= (1 << bBit);
135 pImplChannel = &(controller->aChannel[bBit]);
136 pImplChannel->controller = controller;
137 pImplChannel->bIndex = bBit;
138 pImplChannel->epnum = hw_ep->epnum;
139 pImplChannel->transmit = transmit;
140 pChannel = &(pImplChannel->Channel);
141 pChannel->private_data = pImplChannel;
142 pChannel->status = MUSB_DMA_STATUS_FREE;
143 pChannel->max_len = 0x10000;
144 /* Tx => mode 1; Rx => mode 0 */
145 pChannel->desired_mode = transmit;
146 pChannel->actual_len = 0;
147 break;
148 }
149 }
150 return pChannel;
151}
152
153static void dma_channel_release(struct dma_channel *pChannel)
154{
155 struct musb_dma_channel *pImplChannel =
156 (struct musb_dma_channel *) pChannel->private_data;
157
158 pChannel->actual_len = 0;
159 pImplChannel->dwStartAddress = 0;
160 pImplChannel->len = 0;
161
162 pImplChannel->controller->bmUsedChannels &=
163 ~(1 << pImplChannel->bIndex);
164
165 pChannel->status = MUSB_DMA_STATUS_UNKNOWN;
166}
167
168static void configure_channel(struct dma_channel *pChannel,
169 u16 packet_sz, u8 mode,
170 dma_addr_t dma_addr, u32 len)
171{
172 struct musb_dma_channel *pImplChannel =
173 (struct musb_dma_channel *) pChannel->private_data;
174 struct musb_dma_controller *controller = pImplChannel->controller;
175 void __iomem *mbase = controller->pCoreBase;
176 u8 bChannel = pImplChannel->bIndex;
177 u16 csr = 0;
178
179 DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
180 pChannel, packet_sz, dma_addr, len, mode);
181
182 if (mode) {
183 csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
184 BUG_ON(len < packet_sz);
185
186 if (packet_sz >= 64) {
187 csr |= MUSB_HSDMA_BURSTMODE_INCR16
188 << MUSB_HSDMA_BURSTMODE_SHIFT;
189 } else if (packet_sz >= 32) {
190 csr |= MUSB_HSDMA_BURSTMODE_INCR8
191 << MUSB_HSDMA_BURSTMODE_SHIFT;
192 } else if (packet_sz >= 16) {
193 csr |= MUSB_HSDMA_BURSTMODE_INCR4
194 << MUSB_HSDMA_BURSTMODE_SHIFT;
195 }
196 }
197
198 csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
199 | (1 << MUSB_HSDMA_ENABLE_SHIFT)
200 | (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
201 | (pImplChannel->transmit
202 ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
203 : 0);
204
205 /* address/count */
206 musb_writel(mbase,
207 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
208 dma_addr);
209 musb_writel(mbase,
210 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
211 len);
212
213 /* control (this should start things) */
214 musb_writew(mbase,
215 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
216 csr);
217}
218
219static int dma_channel_program(struct dma_channel *pChannel,
220 u16 packet_sz, u8 mode,
221 dma_addr_t dma_addr, u32 len)
222{
223 struct musb_dma_channel *pImplChannel =
224 (struct musb_dma_channel *) pChannel->private_data;
225
226 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
227 pImplChannel->epnum,
228 pImplChannel->transmit ? "Tx" : "Rx",
229 packet_sz, dma_addr, len, mode);
230
231 BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN ||
232 pChannel->status == MUSB_DMA_STATUS_BUSY);
233
234 pChannel->actual_len = 0;
235 pImplChannel->dwStartAddress = dma_addr;
236 pImplChannel->len = len;
237 pImplChannel->wMaxPacketSize = packet_sz;
238 pChannel->status = MUSB_DMA_STATUS_BUSY;
239
240 if ((mode == 1) && (len >= packet_sz))
241 configure_channel(pChannel, packet_sz, 1, dma_addr, len);
242 else
243 configure_channel(pChannel, packet_sz, 0, dma_addr, len);
244
245 return true;
246}
247
248static int dma_channel_abort(struct dma_channel *pChannel)
249{
250 struct musb_dma_channel *pImplChannel =
251 (struct musb_dma_channel *) pChannel->private_data;
252 u8 bChannel = pImplChannel->bIndex;
253 void __iomem *mbase = pImplChannel->controller->pCoreBase;
254 u16 csr;
255
256 if (pChannel->status == MUSB_DMA_STATUS_BUSY) {
257 if (pImplChannel->transmit) {
258
259 csr = musb_readw(mbase,
260 MUSB_EP_OFFSET(pImplChannel->epnum,
261 MUSB_TXCSR));
262 csr &= ~(MUSB_TXCSR_AUTOSET |
263 MUSB_TXCSR_DMAENAB |
264 MUSB_TXCSR_DMAMODE);
265 musb_writew(mbase,
266 MUSB_EP_OFFSET(pImplChannel->epnum,
267 MUSB_TXCSR),
268 csr);
269 } else {
270 csr = musb_readw(mbase,
271 MUSB_EP_OFFSET(pImplChannel->epnum,
272 MUSB_RXCSR));
273 csr &= ~(MUSB_RXCSR_AUTOCLEAR |
274 MUSB_RXCSR_DMAENAB |
275 MUSB_RXCSR_DMAMODE);
276 musb_writew(mbase,
277 MUSB_EP_OFFSET(pImplChannel->epnum,
278 MUSB_RXCSR),
279 csr);
280 }
281
282 musb_writew(mbase,
283 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
284 0);
285 musb_writel(mbase,
286 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
287 0);
288 musb_writel(mbase,
289 MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
290 0);
291
292 pChannel->status = MUSB_DMA_STATUS_FREE;
293 }
294 return 0;
295}
296
297static irqreturn_t dma_controller_irq(int irq, void *private_data)
298{
299 struct musb_dma_controller *controller =
300 (struct musb_dma_controller *)private_data;
301 struct musb_dma_channel *pImplChannel;
302 struct musb *musb = controller->pDmaPrivate;
303 void __iomem *mbase = controller->pCoreBase;
304 struct dma_channel *pChannel;
305 u8 bChannel;
306 u16 csr;
307 u32 dwAddress;
308 u8 int_hsdma;
309 irqreturn_t retval = IRQ_NONE;
310 unsigned long flags;
311
312 spin_lock_irqsave(&musb->lock, flags);
313
314 int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
315 if (!int_hsdma)
316 goto done;
317
318 for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) {
319 if (int_hsdma & (1 << bChannel)) {
320 pImplChannel = (struct musb_dma_channel *)
321 &(controller->aChannel[bChannel]);
322 pChannel = &pImplChannel->Channel;
323
324 csr = musb_readw(mbase,
325 MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
326 MUSB_HSDMA_CONTROL));
327
328 if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT))
329 pImplChannel->Channel.status =
330 MUSB_DMA_STATUS_BUS_ABORT;
331 else {
332 u8 devctl;
333
334 dwAddress = musb_readl(mbase,
335 MUSB_HSDMA_CHANNEL_OFFSET(
336 bChannel,
337 MUSB_HSDMA_ADDRESS));
338 pChannel->actual_len = dwAddress
339 - pImplChannel->dwStartAddress;
340
341 DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
342 pChannel, pImplChannel->dwStartAddress,
343 dwAddress, pChannel->actual_len,
344 pImplChannel->len,
345 (pChannel->actual_len
346 < pImplChannel->len) ?
347 "=> reconfig 0" : "=> complete");
348
349 devctl = musb_readb(mbase, MUSB_DEVCTL);
350
351 pChannel->status = MUSB_DMA_STATUS_FREE;
352
353 /* completed */
354 if ((devctl & MUSB_DEVCTL_HM)
355 && (pImplChannel->transmit)
356 && ((pChannel->desired_mode == 0)
357 || (pChannel->actual_len &
358 (pImplChannel->wMaxPacketSize - 1)))
359 ) {
360 /* Send out the packet */
361 musb_ep_select(mbase,
362 pImplChannel->epnum);
363 musb_writew(mbase, MUSB_EP_OFFSET(
364 pImplChannel->epnum,
365 MUSB_TXCSR),
366 MUSB_TXCSR_TXPKTRDY);
367 } else
368 musb_dma_completion(
369 musb,
370 pImplChannel->epnum,
371 pImplChannel->transmit);
372 }
373 }
374 }
375 retval = IRQ_HANDLED;
376done:
377 spin_unlock_irqrestore(&musb->lock, flags);
378 return retval;
379}
380
381void dma_controller_destroy(struct dma_controller *c)
382{
383 struct musb_dma_controller *controller;
384
385 controller = container_of(c, struct musb_dma_controller, Controller);
386 if (!controller)
387 return;
388
389 if (controller->irq)
390 free_irq(controller->irq, c);
391
392 kfree(controller);
393}
394
395struct dma_controller *__init
396dma_controller_create(struct musb *musb, void __iomem *pCoreBase)
397{
398 struct musb_dma_controller *controller;
399 struct device *dev = musb->controller;
400 struct platform_device *pdev = to_platform_device(dev);
401 int irq = platform_get_irq(pdev, 1);
402
403 if (irq == 0) {
404 dev_err(dev, "No DMA interrupt line!\n");
405 return NULL;
406 }
407
408 controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL);
409 if (!controller)
410 return NULL;
411
412 controller->bChannelCount = MUSB_HSDMA_CHANNELS;
413 controller->pDmaPrivate = musb;
414 controller->pCoreBase = pCoreBase;
415
416 controller->Controller.start = dma_controller_start;
417 controller->Controller.stop = dma_controller_stop;
418 controller->Controller.channel_alloc = dma_channel_allocate;
419 controller->Controller.channel_release = dma_channel_release;
420 controller->Controller.channel_program = dma_channel_program;
421 controller->Controller.channel_abort = dma_channel_abort;
422
423 if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
424 musb->controller->bus_id, &controller->Controller)) {
425 dev_err(dev, "request_irq %d failed!\n", irq);
426 dma_controller_destroy(&controller->Controller);
427 return NULL;
428 }
429
430 controller->irq = irq;
431
432 return &controller->Controller;
433}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
new file mode 100644
index 000000000000..298b22e6ad0d
--- /dev/null
+++ b/drivers/usb/musb/omap2430.c
@@ -0,0 +1,324 @@
1/*
2 * Copyright (C) 2005-2007 by Texas Instruments
3 * Some code has been taken from tusb6010.c
4 * Copyrights for that are attributable to:
5 * Copyright (C) 2006 Nokia Corporation
6 * Jarkko Nikula <jarkko.nikula@nokia.com>
7 * Tony Lindgren <tony@atomide.com>
8 *
9 * This file is part of the Inventra Controller Driver for Linux.
10 *
11 * The Inventra Controller Driver for Linux is free software; you
12 * can redistribute it and/or modify it under the terms of the GNU
13 * General Public License version 2 as published by the Free Software
14 * Foundation.
15 *
16 * The Inventra Controller Driver for Linux is distributed in
17 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
18 * without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
20 * License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with The Inventra Controller Driver for Linux ; if not,
24 * write to the Free Software Foundation, Inc., 59 Temple Place,
25 * Suite 330, Boston, MA 02111-1307 USA
26 *
27 */
28#include <linux/module.h>
29#include <linux/kernel.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/list.h>
34#include <linux/clk.h>
35#include <linux/io.h>
36
37#include <asm/mach-types.h>
38#include <asm/arch/hardware.h>
39#include <asm/arch/mux.h>
40
41#include "musb_core.h"
42#include "omap2430.h"
43
44#ifdef CONFIG_ARCH_OMAP3430
45#define get_cpu_rev() 2
46#endif
47
48#define MUSB_TIMEOUT_A_WAIT_BCON 1100
49
50static struct timer_list musb_idle_timer;
51
52static void musb_do_idle(unsigned long _musb)
53{
54 struct musb *musb = (void *)_musb;
55 unsigned long flags;
56 u8 power;
57 u8 devctl;
58
59 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
60
61 spin_lock_irqsave(&musb->lock, flags);
62
63 switch (musb->xceiv.state) {
64 case OTG_STATE_A_WAIT_BCON:
65 devctl &= ~MUSB_DEVCTL_SESSION;
66 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
67
68 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
69 if (devctl & MUSB_DEVCTL_BDEVICE) {
70 musb->xceiv.state = OTG_STATE_B_IDLE;
71 MUSB_DEV_MODE(musb);
72 } else {
73 musb->xceiv.state = OTG_STATE_A_IDLE;
74 MUSB_HST_MODE(musb);
75 }
76 break;
77#ifdef CONFIG_USB_MUSB_HDRC_HCD
78 case OTG_STATE_A_SUSPEND:
79 /* finish RESUME signaling? */
80 if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
81 power = musb_readb(musb->mregs, MUSB_POWER);
82 power &= ~MUSB_POWER_RESUME;
83 DBG(1, "root port resume stopped, power %02x\n", power);
84 musb_writeb(musb->mregs, MUSB_POWER, power);
85 musb->is_active = 1;
86 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
87 | MUSB_PORT_STAT_RESUME);
88 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
89 usb_hcd_poll_rh_status(musb_to_hcd(musb));
90 /* NOTE: it might really be A_WAIT_BCON ... */
91 musb->xceiv.state = OTG_STATE_A_HOST;
92 }
93 break;
94#endif
95#ifdef CONFIG_USB_MUSB_HDRC_HCD
96 case OTG_STATE_A_HOST:
97 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
98 if (devctl & MUSB_DEVCTL_BDEVICE)
99 musb->xceiv.state = OTG_STATE_B_IDLE;
100 else
101 musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
102#endif
103 default:
104 break;
105 }
106 spin_unlock_irqrestore(&musb->lock, flags);
107}
108
109
110void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
111{
112 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
113 static unsigned long last_timer;
114
115 if (timeout == 0)
116 timeout = default_timeout;
117
118 /* Never idle if active, or when VBUS timeout is not set as host */
119 if (musb->is_active || ((musb->a_wait_bcon == 0)
120 && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
121 DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
122 del_timer(&musb_idle_timer);
123 last_timer = jiffies;
124 return;
125 }
126
127 if (time_after(last_timer, timeout)) {
128 if (!timer_pending(&musb_idle_timer))
129 last_timer = timeout;
130 else {
131 DBG(4, "Longer idle timer already pending, ignoring\n");
132 return;
133 }
134 }
135 last_timer = timeout;
136
137 DBG(4, "%s inactive, for idle timer for %lu ms\n",
138 otg_state_string(musb),
139 (unsigned long)jiffies_to_msecs(timeout - jiffies));
140 mod_timer(&musb_idle_timer, timeout);
141}
142
143void musb_platform_enable(struct musb *musb)
144{
145}
146void musb_platform_disable(struct musb *musb)
147{
148}
149static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
150{
151}
152
153static void omap_set_vbus(struct musb *musb, int is_on)
154{
155 u8 devctl;
156 /* HDRC controls CPEN, but beware current surges during device
157 * connect. They can trigger transient overcurrent conditions
158 * that must be ignored.
159 */
160
161 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
162
163 if (is_on) {
164 musb->is_active = 1;
165 musb->xceiv.default_a = 1;
166 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
167 devctl |= MUSB_DEVCTL_SESSION;
168
169 MUSB_HST_MODE(musb);
170 } else {
171 musb->is_active = 0;
172
173 /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
174 * jumping right to B_IDLE...
175 */
176
177 musb->xceiv.default_a = 0;
178 musb->xceiv.state = OTG_STATE_B_IDLE;
179 devctl &= ~MUSB_DEVCTL_SESSION;
180
181 MUSB_DEV_MODE(musb);
182 }
183 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
184
185 DBG(1, "VBUS %s, devctl %02x "
186 /* otg %3x conf %08x prcm %08x */ "\n",
187 otg_state_string(musb),
188 musb_readb(musb->mregs, MUSB_DEVCTL));
189}
190static int omap_set_power(struct otg_transceiver *x, unsigned mA)
191{
192 return 0;
193}
194
195static int musb_platform_resume(struct musb *musb);
196
197void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
198{
199 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
200
201 devctl |= MUSB_DEVCTL_SESSION;
202 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
203
204 switch (musb_mode) {
205 case MUSB_HOST:
206 otg_set_host(&musb->xceiv, musb->xceiv.host);
207 break;
208 case MUSB_PERIPHERAL:
209 otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
210 break;
211 case MUSB_OTG:
212 break;
213 }
214}
215
216int __init musb_platform_init(struct musb *musb)
217{
218 u32 l;
219
220#if defined(CONFIG_ARCH_OMAP2430)
221 omap_cfg_reg(AE5_2430_USB0HS_STP);
222#endif
223
224 musb_platform_resume(musb);
225
226 l = omap_readl(OTG_SYSCONFIG);
227 l &= ~ENABLEWAKEUP; /* disable wakeup */
228 l &= ~NOSTDBY; /* remove possible nostdby */
229 l |= SMARTSTDBY; /* enable smart standby */
230 l &= ~AUTOIDLE; /* disable auto idle */
231 l &= ~NOIDLE; /* remove possible noidle */
232 l |= SMARTIDLE; /* enable smart idle */
233 l |= AUTOIDLE; /* enable auto idle */
234 omap_writel(l, OTG_SYSCONFIG);
235
236 l = omap_readl(OTG_INTERFSEL);
237 l |= ULPI_12PIN;
238 omap_writel(l, OTG_INTERFSEL);
239
240 pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
241 "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
242 omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
243 omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
244 omap_readl(OTG_SIMENABLE));
245
246 omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
247
248 if (is_host_enabled(musb))
249 musb->board_set_vbus = omap_set_vbus;
250 if (is_peripheral_enabled(musb))
251 musb->xceiv.set_power = omap_set_power;
252 musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
253
254 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
255
256 return 0;
257}
258
259int musb_platform_suspend(struct musb *musb)
260{
261 u32 l;
262
263 if (!musb->clock)
264 return 0;
265
266 /* in any role */
267 l = omap_readl(OTG_FORCESTDBY);
268 l |= ENABLEFORCE; /* enable MSTANDBY */
269 omap_writel(l, OTG_FORCESTDBY);
270
271 l = omap_readl(OTG_SYSCONFIG);
272 l |= ENABLEWAKEUP; /* enable wakeup */
273 omap_writel(l, OTG_SYSCONFIG);
274
275 if (musb->xceiv.set_suspend)
276 musb->xceiv.set_suspend(&musb->xceiv, 1);
277
278 if (musb->set_clock)
279 musb->set_clock(musb->clock, 0);
280 else
281 clk_disable(musb->clock);
282
283 return 0;
284}
285
286static int musb_platform_resume(struct musb *musb)
287{
288 u32 l;
289
290 if (!musb->clock)
291 return 0;
292
293 if (musb->xceiv.set_suspend)
294 musb->xceiv.set_suspend(&musb->xceiv, 0);
295
296 if (musb->set_clock)
297 musb->set_clock(musb->clock, 1);
298 else
299 clk_enable(musb->clock);
300
301 l = omap_readl(OTG_SYSCONFIG);
302 l &= ~ENABLEWAKEUP; /* disable wakeup */
303 omap_writel(l, OTG_SYSCONFIG);
304
305 l = omap_readl(OTG_FORCESTDBY);
306 l &= ~ENABLEFORCE; /* disable MSTANDBY */
307 omap_writel(l, OTG_FORCESTDBY);
308
309 return 0;
310}
311
312
313int musb_platform_exit(struct musb *musb)
314{
315
316 omap_vbus_power(musb, 0 /*off*/, 1);
317
318 musb_platform_suspend(musb);
319
320 clk_put(musb->clock);
321 musb->clock = 0;
322
323 return 0;
324}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
new file mode 100644
index 000000000000..786a62071f72
--- /dev/null
+++ b/drivers/usb/musb/omap2430.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2005-2006 by Texas Instruments
3 *
4 * The Inventra Controller Driver for Linux is free software; you
5 * can redistribute it and/or modify it under the terms of the GNU
6 * General Public License version 2 as published by the Free Software
7 * Foundation.
8 */
9
10#ifndef __MUSB_OMAP243X_H__
11#define __MUSB_OMAP243X_H__
12
13#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
14#include <asm/arch/hardware.h>
15#include <asm/arch/usb.h>
16
17/*
18 * OMAP2430-specific definitions
19 */
20
21#define MENTOR_BASE_OFFSET 0
22#if defined(CONFIG_ARCH_OMAP2430)
23#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE)
24#elif defined(CONFIG_ARCH_OMAP3430)
25#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
26#endif
27#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
28#define OTG_REVISION OMAP_HSOTG(0x0)
29#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
30# define MIDLEMODE 12 /* bit position */
31# define FORCESTDBY (0 << MIDLEMODE)
32# define NOSTDBY (1 << MIDLEMODE)
33# define SMARTSTDBY (2 << MIDLEMODE)
34# define SIDLEMODE 3 /* bit position */
35# define FORCEIDLE (0 << SIDLEMODE)
36# define NOIDLE (1 << SIDLEMODE)
37# define SMARTIDLE (2 << SIDLEMODE)
38# define ENABLEWAKEUP (1 << 2)
39# define SOFTRST (1 << 1)
40# define AUTOIDLE (1 << 0)
41#define OTG_SYSSTATUS OMAP_HSOTG(0x8)
42# define RESETDONE (1 << 0)
43#define OTG_INTERFSEL OMAP_HSOTG(0xc)
44# define EXTCP (1 << 2)
45# define PHYSEL 0 /* bit position */
46# define UTMI_8BIT (0 << PHYSEL)
47# define ULPI_12PIN (1 << PHYSEL)
48# define ULPI_8PIN (2 << PHYSEL)
49#define OTG_SIMENABLE OMAP_HSOTG(0x10)
50# define TM1 (1 << 0)
51#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
52# define ENABLEFORCE (1 << 0)
53
54#endif /* CONFIG_ARCH_OMAP2430 */
55
56#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
new file mode 100644
index 000000000000..b73b036f3d77
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.c
@@ -0,0 +1,1151 @@
1/*
2 * TUSB6010 USB 2.0 OTG Dual Role controller
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 * Jarkko Nikula <jarkko.nikula@nokia.com>
6 * Tony Lindgren <tony@atomide.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Notes:
13 * - Driver assumes that interface to external host (main CPU) is
14 * configured for NOR FLASH interface instead of VLYNQ serial
15 * interface.
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/usb.h>
23#include <linux/irq.h>
24#include <linux/platform_device.h>
25
26#include "musb_core.h"
27
28static void tusb_source_power(struct musb *musb, int is_on);
29
30#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
31#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
32
33/*
34 * Checks the revision. We need to use the DMA register as 3.0 does not
35 * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
36 */
37u8 tusb_get_revision(struct musb *musb)
38{
39 void __iomem *tbase = musb->ctrl_base;
40 u32 die_id;
41 u8 rev;
42
43 rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
44 if (TUSB_REV_MAJOR(rev) == 3) {
45 die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
46 TUSB_DIDR1_HI));
47 if (die_id >= TUSB_DIDR1_HI_REV_31)
48 rev |= 1;
49 }
50
51 return rev;
52}
53
54static int __init tusb_print_revision(struct musb *musb)
55{
56 void __iomem *tbase = musb->ctrl_base;
57 u8 rev;
58
59 rev = tusb_get_revision(musb);
60
61 pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
62 "prcm",
63 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
64 TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
65 "int",
66 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
67 TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
68 "gpio",
69 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
70 TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
71 "dma",
72 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
73 TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
74 "dieid",
75 TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
76 "rev",
77 TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
78
79 return tusb_get_revision(musb);
80}
81
82#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
83 | TUSB_PHY_OTG_CTRL_TESTM0)
84
85/*
86 * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
87 * Disables power detection in PHY for the duration of idle.
88 */
89static void tusb_wbus_quirk(struct musb *musb, int enabled)
90{
91 void __iomem *tbase = musb->ctrl_base;
92 static u32 phy_otg_ctrl, phy_otg_ena;
93 u32 tmp;
94
95 if (enabled) {
96 phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
97 phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
98 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
99 | phy_otg_ena | WBUS_QUIRK_MASK;
100 musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
101 tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
102 tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
103 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
104 DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
105 musb_readl(tbase, TUSB_PHY_OTG_CTRL),
106 musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
107 } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
108 & TUSB_PHY_OTG_CTRL_TESTM2) {
109 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
110 musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
111 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
112 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
113 DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
114 musb_readl(tbase, TUSB_PHY_OTG_CTRL),
115 musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
116 phy_otg_ctrl = 0;
117 phy_otg_ena = 0;
118 }
119}
120
121/*
122 * TUSB 6010 may use a parallel bus that doesn't support byte ops;
123 * so both loading and unloading FIFOs need explicit byte counts.
124 */
125
126static inline void
127tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
128{
129 u32 val;
130 int i;
131
132 if (len > 4) {
133 for (i = 0; i < (len >> 2); i++) {
134 memcpy(&val, buf, 4);
135 musb_writel(fifo, 0, val);
136 buf += 4;
137 }
138 len %= 4;
139 }
140 if (len > 0) {
141 /* Write the rest 1 - 3 bytes to FIFO */
142 memcpy(&val, buf, len);
143 musb_writel(fifo, 0, val);
144 }
145}
146
147static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
148 void __iomem *buf, u16 len)
149{
150 u32 val;
151 int i;
152
153 if (len > 4) {
154 for (i = 0; i < (len >> 2); i++) {
155 val = musb_readl(fifo, 0);
156 memcpy(buf, &val, 4);
157 buf += 4;
158 }
159 len %= 4;
160 }
161 if (len > 0) {
162 /* Read the rest 1 - 3 bytes from FIFO */
163 val = musb_readl(fifo, 0);
164 memcpy(buf, &val, len);
165 }
166}
167
168void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
169{
170 void __iomem *ep_conf = hw_ep->conf;
171 void __iomem *fifo = hw_ep->fifo;
172 u8 epnum = hw_ep->epnum;
173
174 prefetch(buf);
175
176 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
177 'T', epnum, fifo, len, buf);
178
179 if (epnum)
180 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
181 TUSB_EP_CONFIG_XFR_SIZE(len));
182 else
183 musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
184 TUSB_EP0_CONFIG_XFR_SIZE(len));
185
186 if (likely((0x01 & (unsigned long) buf) == 0)) {
187
188 /* Best case is 32bit-aligned destination address */
189 if ((0x02 & (unsigned long) buf) == 0) {
190 if (len >= 4) {
191 writesl(fifo, buf, len >> 2);
192 buf += (len & ~0x03);
193 len &= 0x03;
194 }
195 } else {
196 if (len >= 2) {
197 u32 val;
198 int i;
199
200 /* Cannot use writesw, fifo is 32-bit */
201 for (i = 0; i < (len >> 2); i++) {
202 val = (u32)(*(u16 *)buf);
203 buf += 2;
204 val |= (*(u16 *)buf) << 16;
205 buf += 2;
206 musb_writel(fifo, 0, val);
207 }
208 len &= 0x03;
209 }
210 }
211 }
212
213 if (len > 0)
214 tusb_fifo_write_unaligned(fifo, buf, len);
215}
216
217void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
218{
219 void __iomem *ep_conf = hw_ep->conf;
220 void __iomem *fifo = hw_ep->fifo;
221 u8 epnum = hw_ep->epnum;
222
223 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
224 'R', epnum, fifo, len, buf);
225
226 if (epnum)
227 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
228 TUSB_EP_CONFIG_XFR_SIZE(len));
229 else
230 musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
231
232 if (likely((0x01 & (unsigned long) buf) == 0)) {
233
234 /* Best case is 32bit-aligned destination address */
235 if ((0x02 & (unsigned long) buf) == 0) {
236 if (len >= 4) {
237 readsl(fifo, buf, len >> 2);
238 buf += (len & ~0x03);
239 len &= 0x03;
240 }
241 } else {
242 if (len >= 2) {
243 u32 val;
244 int i;
245
246 /* Cannot use readsw, fifo is 32-bit */
247 for (i = 0; i < (len >> 2); i++) {
248 val = musb_readl(fifo, 0);
249 *(u16 *)buf = (u16)(val & 0xffff);
250 buf += 2;
251 *(u16 *)buf = (u16)(val >> 16);
252 buf += 2;
253 }
254 len &= 0x03;
255 }
256 }
257 }
258
259 if (len > 0)
260 tusb_fifo_read_unaligned(fifo, buf, len);
261}
262
263#ifdef CONFIG_USB_GADGET_MUSB_HDRC
264
265/* This is used by gadget drivers, and OTG transceiver logic, allowing
266 * at most mA current to be drawn from VBUS during a Default-B session
267 * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host
268 * mode), or low power Default-B sessions, something else supplies power.
269 * Caller must take care of locking.
270 */
271static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
272{
273 struct musb *musb = container_of(x, struct musb, xceiv);
274 void __iomem *tbase = musb->ctrl_base;
275 u32 reg;
276
277 /*
278 * Keep clock active when enabled. Note that this is not tied to
279 * drawing VBUS, as with OTG mA can be less than musb->min_power.
280 */
281 if (musb->set_clock) {
282 if (mA)
283 musb->set_clock(musb->clock, 1);
284 else
285 musb->set_clock(musb->clock, 0);
286 }
287
288 /* tps65030 seems to consume max 100mA, with maybe 60mA available
289 * (measured on one board) for things other than tps and tusb.
290 *
291 * Boards sharing the CPU clock with CLKIN will need to prevent
292 * certain idle sleep states while the USB link is active.
293 *
294 * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
295 * The actual current usage would be very board-specific. For now,
296 * it's simpler to just use an aggregate (also board-specific).
297 */
298 if (x->default_a || mA < (musb->min_power << 1))
299 mA = 0;
300
301 reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
302 if (mA) {
303 musb->is_bus_powered = 1;
304 reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
305 } else {
306 musb->is_bus_powered = 0;
307 reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
308 }
309 musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
310
311 DBG(2, "draw max %d mA VBUS\n", mA);
312 return 0;
313}
314
315#else
316#define tusb_draw_power NULL
317#endif
318
319/* workaround for issue 13: change clock during chip idle
320 * (to be fixed in rev3 silicon) ... symptoms include disconnect
321 * or looping suspend/resume cycles
322 */
323static void tusb_set_clock_source(struct musb *musb, unsigned mode)
324{
325 void __iomem *tbase = musb->ctrl_base;
326 u32 reg;
327
328 reg = musb_readl(tbase, TUSB_PRCM_CONF);
329 reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
330
331 /* 0 = refclk (clkin, XI)
332 * 1 = PHY 60 MHz (internal PLL)
333 * 2 = not supported
334 * 3 = what?
335 */
336 if (mode > 0)
337 reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
338
339 musb_writel(tbase, TUSB_PRCM_CONF, reg);
340
341 /* FIXME tusb6010_platform_retime(mode == 0); */
342}
343
344/*
345 * Idle TUSB6010 until next wake-up event; NOR access always wakes.
346 * Other code ensures that we idle unless we're connected _and_ the
347 * USB link is not suspended ... and tells us the relevant wakeup
348 * events. SW_EN for voltage is handled separately.
349 */
350void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
351{
352 void __iomem *tbase = musb->ctrl_base;
353 u32 reg;
354
355 if ((wakeup_enables & TUSB_PRCM_WBUS)
356 && (tusb_get_revision(musb) == TUSB_REV_30))
357 tusb_wbus_quirk(musb, 1);
358
359 tusb_set_clock_source(musb, 0);
360
361 wakeup_enables |= TUSB_PRCM_WNORCS;
362 musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
363
364 /* REVISIT writeup of WID implies that if WID set and ID is grounded,
365 * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
366 * Presumably that's mostly to save power, hence WID is immaterial ...
367 */
368
369 reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
370 /* issue 4: when driving vbus, use hipower (vbus_det) comparator */
371 if (is_host_active(musb)) {
372 reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
373 reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
374 } else {
375 reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
376 reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
377 }
378 reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
379 musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
380
381 DBG(6, "idle, wake on %02x\n", wakeup_enables);
382}
383
384/*
385 * Updates cable VBUS status. Caller must take care of locking.
386 */
387int musb_platform_get_vbus_status(struct musb *musb)
388{
389 void __iomem *tbase = musb->ctrl_base;
390 u32 otg_stat, prcm_mngmt;
391 int ret = 0;
392
393 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
394 prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
395
396 /* Temporarily enable VBUS detection if it was disabled for
397 * suspend mode. Unless it's enabled otg_stat and devctl will
398 * not show correct VBUS state.
399 */
400 if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
401 u32 tmp = prcm_mngmt;
402 tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
403 musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
404 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
405 musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
406 }
407
408 if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
409 ret = 1;
410
411 return ret;
412}
413
414static struct timer_list musb_idle_timer;
415
416static void musb_do_idle(unsigned long _musb)
417{
418 struct musb *musb = (void *)_musb;
419 unsigned long flags;
420
421 spin_lock_irqsave(&musb->lock, flags);
422
423 switch (musb->xceiv.state) {
424 case OTG_STATE_A_WAIT_BCON:
425 if ((musb->a_wait_bcon != 0)
426 && (musb->idle_timeout == 0
427 || time_after(jiffies, musb->idle_timeout))) {
428 DBG(4, "Nothing connected %s, turning off VBUS\n",
429 otg_state_string(musb));
430 }
431 /* FALLTHROUGH */
432 case OTG_STATE_A_IDLE:
433 tusb_source_power(musb, 0);
434 default:
435 break;
436 }
437
438 if (!musb->is_active) {
439 u32 wakeups;
440
441 /* wait until khubd handles port change status */
442 if (is_host_active(musb) && (musb->port1_status >> 16))
443 goto done;
444
445#ifdef CONFIG_USB_GADGET_MUSB_HDRC
446 if (is_peripheral_enabled(musb) && !musb->gadget_driver)
447 wakeups = 0;
448 else {
449 wakeups = TUSB_PRCM_WHOSTDISCON
450 | TUSB_PRCM_WBUS
451 | TUSB_PRCM_WVBUS;
452 if (is_otg_enabled(musb))
453 wakeups |= TUSB_PRCM_WID;
454 }
455#else
456 wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS;
457#endif
458 tusb_allow_idle(musb, wakeups);
459 }
460done:
461 spin_unlock_irqrestore(&musb->lock, flags);
462}
463
464/*
465 * Maybe put TUSB6010 into idle mode mode depending on USB link status,
466 * like "disconnected" or "suspended". We'll be woken out of it by
467 * connect, resume, or disconnect.
468 *
469 * Needs to be called as the last function everywhere where there is
470 * register access to TUSB6010 because of NOR flash wake-up.
471 * Caller should own controller spinlock.
472 *
473 * Delay because peripheral enables D+ pullup 3msec after SE0, and
474 * we don't want to treat that full speed J as a wakeup event.
475 * ... peripherals must draw only suspend current after 10 msec.
476 */
477void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
478{
479 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
480 static unsigned long last_timer;
481
482 if (timeout == 0)
483 timeout = default_timeout;
484
485 /* Never idle if active, or when VBUS timeout is not set as host */
486 if (musb->is_active || ((musb->a_wait_bcon == 0)
487 && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
488 DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
489 del_timer(&musb_idle_timer);
490 last_timer = jiffies;
491 return;
492 }
493
494 if (time_after(last_timer, timeout)) {
495 if (!timer_pending(&musb_idle_timer))
496 last_timer = timeout;
497 else {
498 DBG(4, "Longer idle timer already pending, ignoring\n");
499 return;
500 }
501 }
502 last_timer = timeout;
503
504 DBG(4, "%s inactive, for idle timer for %lu ms\n",
505 otg_state_string(musb),
506 (unsigned long)jiffies_to_msecs(timeout - jiffies));
507 mod_timer(&musb_idle_timer, timeout);
508}
509
510/* ticks of 60 MHz clock */
511#define DEVCLOCK 60000000
512#define OTG_TIMER_MS(msecs) ((msecs) \
513 ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
514 | TUSB_DEV_OTG_TIMER_ENABLE) \
515 : 0)
516
517static void tusb_source_power(struct musb *musb, int is_on)
518{
519 void __iomem *tbase = musb->ctrl_base;
520 u32 conf, prcm, timer;
521 u8 devctl;
522
523 /* HDRC controls CPEN, but beware current surges during device
524 * connect. They can trigger transient overcurrent conditions
525 * that must be ignored.
526 */
527
528 prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
529 conf = musb_readl(tbase, TUSB_DEV_CONF);
530 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
531
532 if (is_on) {
533 if (musb->set_clock)
534 musb->set_clock(musb->clock, 1);
535 timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
536 musb->xceiv.default_a = 1;
537 musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
538 devctl |= MUSB_DEVCTL_SESSION;
539
540 conf |= TUSB_DEV_CONF_USB_HOST_MODE;
541 MUSB_HST_MODE(musb);
542 } else {
543 u32 otg_stat;
544
545 timer = 0;
546
547 /* If ID pin is grounded, we want to be a_idle */
548 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
549 if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
550 switch (musb->xceiv.state) {
551 case OTG_STATE_A_WAIT_VRISE:
552 case OTG_STATE_A_WAIT_BCON:
553 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
554 break;
555 case OTG_STATE_A_WAIT_VFALL:
556 musb->xceiv.state = OTG_STATE_A_IDLE;
557 break;
558 default:
559 musb->xceiv.state = OTG_STATE_A_IDLE;
560 }
561 musb->is_active = 0;
562 musb->xceiv.default_a = 1;
563 MUSB_HST_MODE(musb);
564 } else {
565 musb->is_active = 0;
566 musb->xceiv.default_a = 0;
567 musb->xceiv.state = OTG_STATE_B_IDLE;
568 MUSB_DEV_MODE(musb);
569 }
570
571 devctl &= ~MUSB_DEVCTL_SESSION;
572 conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
573 if (musb->set_clock)
574 musb->set_clock(musb->clock, 0);
575 }
576 prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
577
578 musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
579 musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
580 musb_writel(tbase, TUSB_DEV_CONF, conf);
581 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
582
583 DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
584 otg_state_string(musb),
585 musb_readb(musb->mregs, MUSB_DEVCTL),
586 musb_readl(tbase, TUSB_DEV_OTG_STAT),
587 conf, prcm);
588}
589
590/*
591 * Sets the mode to OTG, peripheral or host by changing the ID detection.
592 * Caller must take care of locking.
593 *
594 * Note that if a mini-A cable is plugged in the ID line will stay down as
595 * the weak ID pull-up is not able to pull the ID up.
596 *
597 * REVISIT: It would be possible to add support for changing between host
598 * and peripheral modes in non-OTG configurations by reconfiguring hardware
599 * and then setting musb->board_mode. For now, only support OTG mode.
600 */
601void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
602{
603 void __iomem *tbase = musb->ctrl_base;
604 u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
605
606 if (musb->board_mode != MUSB_OTG) {
607 ERR("Changing mode currently only supported in OTG mode\n");
608 return;
609 }
610
611 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
612 phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
613 phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
614 dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
615
616 switch (musb_mode) {
617
618#ifdef CONFIG_USB_MUSB_HDRC_HCD
619 case MUSB_HOST: /* Disable PHY ID detect, ground ID */
620 phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
621 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
622 dev_conf |= TUSB_DEV_CONF_ID_SEL;
623 dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
624 break;
625#endif
626
627#ifdef CONFIG_USB_GADGET_MUSB_HDRC
628 case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
629 phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
630 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
631 dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
632 break;
633#endif
634
635#ifdef CONFIG_USB_MUSB_OTG
636 case MUSB_OTG: /* Use PHY ID detection */
637 phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
638 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
639 dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
640 break;
641#endif
642
643 default:
644 DBG(2, "Trying to set unknown mode %i\n", musb_mode);
645 }
646
647 musb_writel(tbase, TUSB_PHY_OTG_CTRL,
648 TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
649 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
650 TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
651 musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
652
653 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
654 if ((musb_mode == MUSB_PERIPHERAL) &&
655 !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
656 INFO("Cannot be peripheral with mini-A cable "
657 "otg_stat: %08x\n", otg_stat);
658}
659
660static inline unsigned long
661tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
662{
663 u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
664 unsigned long idle_timeout = 0;
665
666 /* ID pin */
667 if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
668 int default_a;
669
670 if (is_otg_enabled(musb))
671 default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
672 else
673 default_a = is_host_enabled(musb);
674 DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
675 musb->xceiv.default_a = default_a;
676 tusb_source_power(musb, default_a);
677
678 /* Don't allow idling immediately */
679 if (default_a)
680 idle_timeout = jiffies + (HZ * 3);
681 }
682
683 /* VBUS state change */
684 if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
685
686 /* B-dev state machine: no vbus ~= disconnect */
687 if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
688 || !is_host_enabled(musb)) {
689#ifdef CONFIG_USB_MUSB_HDRC_HCD
690 /* ? musb_root_disconnect(musb); */
691 musb->port1_status &=
692 ~(USB_PORT_STAT_CONNECTION
693 | USB_PORT_STAT_ENABLE
694 | USB_PORT_STAT_LOW_SPEED
695 | USB_PORT_STAT_HIGH_SPEED
696 | USB_PORT_STAT_TEST
697 );
698#endif
699
700 if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
701 DBG(1, "Forcing disconnect (no interrupt)\n");
702 if (musb->xceiv.state != OTG_STATE_B_IDLE) {
703 /* INTR_DISCONNECT can hide... */
704 musb->xceiv.state = OTG_STATE_B_IDLE;
705 musb->int_usb |= MUSB_INTR_DISCONNECT;
706 }
707 musb->is_active = 0;
708 }
709 DBG(2, "vbus change, %s, otg %03x\n",
710 otg_state_string(musb), otg_stat);
711 idle_timeout = jiffies + (1 * HZ);
712 schedule_work(&musb->irq_work);
713
714 } else /* A-dev state machine */ {
715 DBG(2, "vbus change, %s, otg %03x\n",
716 otg_state_string(musb), otg_stat);
717
718 switch (musb->xceiv.state) {
719 case OTG_STATE_A_IDLE:
720 DBG(2, "Got SRP, turning on VBUS\n");
721 musb_set_vbus(musb, 1);
722
723 /* CONNECT can wake if a_wait_bcon is set */
724 if (musb->a_wait_bcon != 0)
725 musb->is_active = 0;
726 else
727 musb->is_active = 1;
728
729 /*
730 * OPT FS A TD.4.6 needs few seconds for
731 * A_WAIT_VRISE
732 */
733 idle_timeout = jiffies + (2 * HZ);
734
735 break;
736 case OTG_STATE_A_WAIT_VRISE:
737 /* ignore; A-session-valid < VBUS_VALID/2,
738 * we monitor this with the timer
739 */
740 break;
741 case OTG_STATE_A_WAIT_VFALL:
742 /* REVISIT this irq triggers during short
743 * spikes caused by enumeration ...
744 */
745 if (musb->vbuserr_retry) {
746 musb->vbuserr_retry--;
747 tusb_source_power(musb, 1);
748 } else {
749 musb->vbuserr_retry
750 = VBUSERR_RETRY_COUNT;
751 tusb_source_power(musb, 0);
752 }
753 break;
754 default:
755 break;
756 }
757 }
758 }
759
760 /* OTG timer expiration */
761 if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
762 u8 devctl;
763
764 DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
765
766 switch (musb->xceiv.state) {
767 case OTG_STATE_A_WAIT_VRISE:
768 /* VBUS has probably been valid for a while now,
769 * but may well have bounced out of range a bit
770 */
771 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
772 if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
773 if ((devctl & MUSB_DEVCTL_VBUS)
774 != MUSB_DEVCTL_VBUS) {
775 DBG(2, "devctl %02x\n", devctl);
776 break;
777 }
778 musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
779 musb->is_active = 0;
780 idle_timeout = jiffies
781 + msecs_to_jiffies(musb->a_wait_bcon);
782 } else {
783 /* REVISIT report overcurrent to hub? */
784 ERR("vbus too slow, devctl %02x\n", devctl);
785 tusb_source_power(musb, 0);
786 }
787 break;
788 case OTG_STATE_A_WAIT_BCON:
789 if (musb->a_wait_bcon != 0)
790 idle_timeout = jiffies
791 + msecs_to_jiffies(musb->a_wait_bcon);
792 break;
793 case OTG_STATE_A_SUSPEND:
794 break;
795 case OTG_STATE_B_WAIT_ACON:
796 break;
797 default:
798 break;
799 }
800 }
801 schedule_work(&musb->irq_work);
802
803 return idle_timeout;
804}
805
806static irqreturn_t tusb_interrupt(int irq, void *__hci)
807{
808 struct musb *musb = __hci;
809 void __iomem *tbase = musb->ctrl_base;
810 unsigned long flags, idle_timeout = 0;
811 u32 int_mask, int_src;
812
813 spin_lock_irqsave(&musb->lock, flags);
814
815 /* Mask all interrupts to allow using both edge and level GPIO irq */
816 int_mask = musb_readl(tbase, TUSB_INT_MASK);
817 musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
818
819 int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
820 DBG(3, "TUSB IRQ %08x\n", int_src);
821
822 musb->int_usb = (u8) int_src;
823
824 /* Acknowledge wake-up source interrupts */
825 if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
826 u32 reg;
827 u32 i;
828
829 if (tusb_get_revision(musb) == TUSB_REV_30)
830 tusb_wbus_quirk(musb, 0);
831
832 /* there are issues re-locking the PLL on wakeup ... */
833
834 /* work around issue 8 */
835 for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
836 musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
837 musb_writel(tbase, TUSB_SCRATCH_PAD, i);
838 reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
839 if (reg == i)
840 break;
841 DBG(6, "TUSB NOR not ready\n");
842 }
843
844 /* work around issue 13 (2nd half) */
845 tusb_set_clock_source(musb, 1);
846
847 reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
848 musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
849 if (reg & ~TUSB_PRCM_WNORCS) {
850 musb->is_active = 1;
851 schedule_work(&musb->irq_work);
852 }
853 DBG(3, "wake %sactive %02x\n",
854 musb->is_active ? "" : "in", reg);
855
856 /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
857 }
858
859 if (int_src & TUSB_INT_SRC_USB_IP_CONN)
860 del_timer(&musb_idle_timer);
861
862 /* OTG state change reports (annoyingly) not issued by Mentor core */
863 if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
864 | TUSB_INT_SRC_OTG_TIMEOUT
865 | TUSB_INT_SRC_ID_STATUS_CHNG))
866 idle_timeout = tusb_otg_ints(musb, int_src, tbase);
867
868 /* TX dma callback must be handled here, RX dma callback is
869 * handled in tusb_omap_dma_cb.
870 */
871 if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
872 u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
873 u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
874
875 DBG(3, "DMA IRQ %08x\n", dma_src);
876 real_dma_src = ~real_dma_src & dma_src;
877 if (tusb_dma_omap() && real_dma_src) {
878 int tx_source = (real_dma_src & 0xffff);
879 int i;
880
881 for (i = 1; i <= 15; i++) {
882 if (tx_source & (1 << i)) {
883 DBG(3, "completing ep%i %s\n", i, "tx");
884 musb_dma_completion(musb, i, 1);
885 }
886 }
887 }
888 musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
889 }
890
891 /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
892 if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
893 u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
894
895 musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
896 musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
897 musb->int_tx = (musb_src & 0xffff);
898 } else {
899 musb->int_rx = 0;
900 musb->int_tx = 0;
901 }
902
903 if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
904 musb_interrupt(musb);
905
906 /* Acknowledge TUSB interrupts. Clear only non-reserved bits */
907 musb_writel(tbase, TUSB_INT_SRC_CLEAR,
908 int_src & ~TUSB_INT_MASK_RESERVED_BITS);
909
910 musb_platform_try_idle(musb, idle_timeout);
911
912 musb_writel(tbase, TUSB_INT_MASK, int_mask);
913 spin_unlock_irqrestore(&musb->lock, flags);
914
915 return IRQ_HANDLED;
916}
917
918static int dma_off;
919
920/*
921 * Enables TUSB6010. Caller must take care of locking.
922 * REVISIT:
923 * - Check what is unnecessary in MGC_HdrcStart()
924 */
925void musb_platform_enable(struct musb *musb)
926{
927 void __iomem *tbase = musb->ctrl_base;
928
929 /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
930 * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
931 musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
932
933 /* Setup TUSB interrupt, disable DMA and GPIO interrupts */
934 musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
935 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
936 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
937
938 /* Clear all subsystem interrups */
939 musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
940 musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
941 musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
942
943 /* Acknowledge pending interrupt(s) */
944 musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
945
946 /* Only 0 clock cycles for minimum interrupt de-assertion time and
947 * interrupt polarity active low seems to work reliably here */
948 musb_writel(tbase, TUSB_INT_CTRL_CONF,
949 TUSB_INT_CTRL_CONF_INT_RELCYC(0));
950
951 set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
952
953 /* maybe force into the Default-A OTG state machine */
954 if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
955 & TUSB_DEV_OTG_STAT_ID_STATUS))
956 musb_writel(tbase, TUSB_INT_SRC_SET,
957 TUSB_INT_SRC_ID_STATUS_CHNG);
958
959 if (is_dma_capable() && dma_off)
960 printk(KERN_WARNING "%s %s: dma not reactivated\n",
961 __FILE__, __func__);
962 else
963 dma_off = 1;
964}
965
966/*
967 * Disables TUSB6010. Caller must take care of locking.
968 */
969void musb_platform_disable(struct musb *musb)
970{
971 void __iomem *tbase = musb->ctrl_base;
972
973 /* FIXME stop DMA, IRQs, timers, ... */
974
975 /* disable all IRQs */
976 musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
977 musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
978 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
979 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
980
981 del_timer(&musb_idle_timer);
982
983 if (is_dma_capable() && !dma_off) {
984 printk(KERN_WARNING "%s %s: dma still active\n",
985 __FILE__, __func__);
986 dma_off = 1;
987 }
988}
989
990/*
991 * Sets up TUSB6010 CPU interface specific signals and registers
992 * Note: Settings optimized for OMAP24xx
993 */
994static void __init tusb_setup_cpu_interface(struct musb *musb)
995{
996 void __iomem *tbase = musb->ctrl_base;
997
998 /*
999 * Disable GPIO[5:0] pullups (used as output DMA requests)
1000 * Don't disable GPIO[7:6] as they are needed for wake-up.
1001 */
1002 musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
1003
1004 /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
1005 musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
1006
1007 /* Turn GPIO[5:0] to DMAREQ[5:0] signals */
1008 musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
1009
1010 /* Burst size 16x16 bits, all six DMA requests enabled, DMA request
1011 * de-assertion time 2 system clocks p 62 */
1012 musb_writel(tbase, TUSB_DMA_REQ_CONF,
1013 TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
1014 TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
1015 TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
1016
1017 /* Set 0 wait count for synchronous burst access */
1018 musb_writel(tbase, TUSB_WAIT_COUNT, 1);
1019}
1020
1021static int __init tusb_start(struct musb *musb)
1022{
1023 void __iomem *tbase = musb->ctrl_base;
1024 int ret = 0;
1025 unsigned long flags;
1026 u32 reg;
1027
1028 if (musb->board_set_power)
1029 ret = musb->board_set_power(1);
1030 if (ret != 0) {
1031 printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
1032 return ret;
1033 }
1034
1035 spin_lock_irqsave(&musb->lock, flags);
1036
1037 if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
1038 TUSB_PROD_TEST_RESET_VAL) {
1039 printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
1040 goto err;
1041 }
1042
1043 ret = tusb_print_revision(musb);
1044 if (ret < 2) {
1045 printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
1046 ret);
1047 goto err;
1048 }
1049
1050 /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
1051 * NOR FLASH interface is used */
1052 musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
1053
1054 /* Select PHY free running 60MHz as a system clock */
1055 tusb_set_clock_source(musb, 1);
1056
1057 /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
1058 * power saving, enable VBus detect and session end comparators,
1059 * enable IDpullup, enable VBus charging */
1060 musb_writel(tbase, TUSB_PRCM_MNGMT,
1061 TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
1062 TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
1063 TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
1064 TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
1065 TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
1066 tusb_setup_cpu_interface(musb);
1067
1068 /* simplify: always sense/pullup ID pins, as if in OTG mode */
1069 reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
1070 reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
1071 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
1072
1073 reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
1074 reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
1075 musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
1076
1077 spin_unlock_irqrestore(&musb->lock, flags);
1078
1079 return 0;
1080
1081err:
1082 spin_unlock_irqrestore(&musb->lock, flags);
1083
1084 if (musb->board_set_power)
1085 musb->board_set_power(0);
1086
1087 return -ENODEV;
1088}
1089
1090int __init musb_platform_init(struct musb *musb)
1091{
1092 struct platform_device *pdev;
1093 struct resource *mem;
1094 void __iomem *sync;
1095 int ret;
1096
1097 pdev = to_platform_device(musb->controller);
1098
1099 /* dma address for async dma */
1100 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1101 musb->async = mem->start;
1102
1103 /* dma address for sync dma */
1104 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1105 if (!mem) {
1106 pr_debug("no sync dma resource?\n");
1107 return -ENODEV;
1108 }
1109 musb->sync = mem->start;
1110
1111 sync = ioremap(mem->start, mem->end - mem->start + 1);
1112 if (!sync) {
1113 pr_debug("ioremap for sync failed\n");
1114 return -ENOMEM;
1115 }
1116 musb->sync_va = sync;
1117
1118 /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
1119 * FIFOs at 0x600, TUSB at 0x800
1120 */
1121 musb->mregs += TUSB_BASE_OFFSET;
1122
1123 ret = tusb_start(musb);
1124 if (ret) {
1125 printk(KERN_ERR "Could not start tusb6010 (%d)\n",
1126 ret);
1127 return -ENODEV;
1128 }
1129 musb->isr = tusb_interrupt;
1130
1131 if (is_host_enabled(musb))
1132 musb->board_set_vbus = tusb_source_power;
1133 if (is_peripheral_enabled(musb))
1134 musb->xceiv.set_power = tusb_draw_power;
1135
1136 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
1137
1138 return ret;
1139}
1140
1141int musb_platform_exit(struct musb *musb)
1142{
1143 del_timer_sync(&musb_idle_timer);
1144
1145 if (musb->board_set_power)
1146 musb->board_set_power(0);
1147
1148 iounmap(musb->sync_va);
1149
1150 return 0;
1151}
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
new file mode 100644
index 000000000000..ab8c96286ce6
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.h
@@ -0,0 +1,233 @@
1/*
2 * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 * Jarkko Nikula <jarkko.nikula@nokia.com>
6 * Tony Lindgren <tony@atomide.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __TUSB6010_H__
14#define __TUSB6010_H__
15
16extern u8 tusb_get_revision(struct musb *musb);
17
18#ifdef CONFIG_USB_TUSB6010
19#define musb_in_tusb() 1
20#else
21#define musb_in_tusb() 0
22#endif
23
24#ifdef CONFIG_USB_TUSB_OMAP_DMA
25#define tusb_dma_omap() 1
26#else
27#define tusb_dma_omap() 0
28#endif
29
30/* VLYNQ control register. 32-bit at offset 0x000 */
31#define TUSB_VLYNQ_CTRL 0x004
32
33/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */
34#define TUSB_BASE_OFFSET 0x400
35
36/* FIFO registers 32-bit at offset 0x600 */
37#define TUSB_FIFO_BASE 0x600
38
39/* Device System & Control registers. 32-bit at offset 0x800 */
40#define TUSB_SYS_REG_BASE 0x800
41
42#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000)
43#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16)
44#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15)
45#define TUSB_DEV_CONF_SOFT_ID (1 << 1)
46#define TUSB_DEV_CONF_ID_SEL (1 << 0)
47
48#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004)
49#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008)
50#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24)
51#define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23)
52#define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19)
53#define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18)
54#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17)
55#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16)
56#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15)
57#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14)
58#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13)
59#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12)
60#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11)
61#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10)
62#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9)
63#define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7)
64#define TUSB_PHY_OTG_CTRL_PD (1 << 6)
65#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5)
66#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4)
67#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3)
68#define TUSB_PHY_OTG_CTRL_RESET (1 << 2)
69#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1)
70#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0)
71
72/*OTG status register */
73#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c)
74#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8)
75#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7)
76#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6)
77#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5)
78#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4)
79#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3)
80#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2)
81#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0)
82#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1)
83#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0)
84
85#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010)
86# define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31)
87# define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff)
88#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014)
89
90/* PRCM configuration register */
91#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018)
92#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24)
93#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16)
94
95/* PRCM management register */
96#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c)
97#define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25)
98#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24)
99#define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20)
100#define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19)
101#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18)
102#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17)
103#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10)
104#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9)
105#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8)
106#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4)
107#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3)
108#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2)
109#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1)
110#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0)
111
112/* Wake-up source clear and mask registers */
113#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020)
114#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028)
115#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c)
116#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13)
117#define TUSB_PRCM_WGPIO_7 (1 << 12)
118#define TUSB_PRCM_WGPIO_6 (1 << 11)
119#define TUSB_PRCM_WGPIO_5 (1 << 10)
120#define TUSB_PRCM_WGPIO_4 (1 << 9)
121#define TUSB_PRCM_WGPIO_3 (1 << 8)
122#define TUSB_PRCM_WGPIO_2 (1 << 7)
123#define TUSB_PRCM_WGPIO_1 (1 << 6)
124#define TUSB_PRCM_WGPIO_0 (1 << 5)
125#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */
126#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */
127#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */
128#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */
129#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */
130
131#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030)
132#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034)
133#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038)
134#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c)
135#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040)
136#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044)
137#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048)
138#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c)
139#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050)
140#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054)
141#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058)
142#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c)
143#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060)
144#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064)
145#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068)
146#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c)
147
148/* NOR flash interrupt source registers */
149#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070)
150#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074)
151#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078)
152#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c)
153#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24)
154#define TUSB_INT_SRC_USB_IP_CORE (1 << 17)
155#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16)
156#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15)
157#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14)
158#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13)
159#define TUSB_INT_SRC_DEV_READY (1 << 12)
160#define TUSB_INT_SRC_USB_IP_TX (1 << 9)
161#define TUSB_INT_SRC_USB_IP_RX (1 << 8)
162#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7)
163#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6)
164#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5)
165#define TUSB_INT_SRC_USB_IP_CONN (1 << 4)
166#define TUSB_INT_SRC_USB_IP_SOF (1 << 3)
167#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2)
168#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1)
169#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0)
170
171/* NOR flash interrupt registers reserved bits. Must be written as 0 */
172#define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17)
173#define TUSB_INT_MASK_RESERVED_13 (1 << 13)
174#define TUSB_INT_MASK_RESERVED_8 (0xf << 8)
175#define TUSB_INT_SRC_RESERVED_26 (0x1f << 26)
176#define TUSB_INT_SRC_RESERVED_18 (0x3f << 18)
177#define TUSB_INT_SRC_RESERVED_10 (0x03 << 10)
178
179/* Reserved bits for NOR flash interrupt mask and clear register */
180#define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \
181 TUSB_INT_MASK_RESERVED_13 | \
182 TUSB_INT_MASK_RESERVED_8)
183
184/* Reserved bits for NOR flash interrupt status register */
185#define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \
186 TUSB_INT_SRC_RESERVED_18 | \
187 TUSB_INT_SRC_RESERVED_10)
188
189#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080)
190#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084)
191#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100)
192#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104)
193#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108)
194#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148)
195
196/* Offsets from each ep base register */
197#define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */
198#define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */
199#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188
200
201#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8)
202#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4)
203#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8)
204
205/* Device System & Control register bitfields */
206#define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18)
207#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17)
208#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16)
209#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24)
210#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26)
211#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20)
212#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16)
213#define TUSB_EP0_CONFIG_SW_EN (1 << 8)
214#define TUSB_EP0_CONFIG_DIR_TX (1 << 7)
215#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f)
216#define TUSB_EP_CONFIG_SW_EN (1 << 31)
217#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff)
218#define TUSB_PROD_TEST_RESET_VAL 0xa596
219#define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20)
220
221#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8)
222#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc)
223#define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf)
224#define TUSB_DIDR1_HI_REV_20 0
225#define TUSB_DIDR1_HI_REV_30 1
226#define TUSB_DIDR1_HI_REV_31 2
227
228#define TUSB_REV_10 0x10
229#define TUSB_REV_20 0x20
230#define TUSB_REV_30 0x30
231#define TUSB_REV_31 0x31
232
233#endif /* __TUSB6010_H__ */
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
new file mode 100644
index 000000000000..52f7f29cebda
--- /dev/null
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -0,0 +1,719 @@
1/*
2 * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/usb.h>
16#include <linux/platform_device.h>
17#include <linux/dma-mapping.h>
18#include <asm/arch/dma.h>
19#include <asm/arch/mux.h>
20
21#include "musb_core.h"
22
23#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
24
25#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
26
27struct tusb_omap_dma_ch {
28 struct musb *musb;
29 void __iomem *tbase;
30 unsigned long phys_offset;
31 int epnum;
32 u8 tx;
33 struct musb_hw_ep *hw_ep;
34
35 int ch;
36 s8 dmareq;
37 s8 sync_dev;
38
39 struct tusb_omap_dma *tusb_dma;
40
41 void __iomem *dma_addr;
42
43 u32 len;
44 u16 packet_sz;
45 u16 transfer_packet_sz;
46 u32 transfer_len;
47 u32 completed_len;
48};
49
50struct tusb_omap_dma {
51 struct dma_controller controller;
52 struct musb *musb;
53 void __iomem *tbase;
54
55 int ch;
56 s8 dmareq;
57 s8 sync_dev;
58 unsigned multichannel:1;
59};
60
61static int tusb_omap_dma_start(struct dma_controller *c)
62{
63 struct tusb_omap_dma *tusb_dma;
64
65 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
66
67 /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
68
69 return 0;
70}
71
72static int tusb_omap_dma_stop(struct dma_controller *c)
73{
74 struct tusb_omap_dma *tusb_dma;
75
76 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
77
78 /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
79
80 return 0;
81}
82
83/*
84 * Allocate dmareq0 to the current channel unless it's already taken
85 */
86static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
87{
88 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
89
90 if (reg != 0) {
91 DBG(3, "ep%i dmareq0 is busy for ep%i\n",
92 chdat->epnum, reg & 0xf);
93 return -EAGAIN;
94 }
95
96 if (chdat->tx)
97 reg = (1 << 4) | chdat->epnum;
98 else
99 reg = chdat->epnum;
100
101 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
102
103 return 0;
104}
105
106static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
107{
108 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
109
110 if ((reg & 0xf) != chdat->epnum) {
111 printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
112 chdat->epnum, reg & 0xf);
113 return;
114 }
115 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
116}
117
118/*
119 * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
120 * musb_gadget.c.
121 */
122static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
123{
124 struct dma_channel *channel = (struct dma_channel *)data;
125 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
126 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
127 struct musb *musb = chdat->musb;
128 struct musb_hw_ep *hw_ep = chdat->hw_ep;
129 void __iomem *ep_conf = hw_ep->conf;
130 void __iomem *mbase = musb->mregs;
131 unsigned long remaining, flags, pio;
132 int ch;
133
134 spin_lock_irqsave(&musb->lock, flags);
135
136 if (tusb_dma->multichannel)
137 ch = chdat->ch;
138 else
139 ch = tusb_dma->ch;
140
141 if (ch_status != OMAP_DMA_BLOCK_IRQ)
142 printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
143
144 DBG(3, "ep%i %s dma callback ch: %i status: %x\n",
145 chdat->epnum, chdat->tx ? "tx" : "rx",
146 ch, ch_status);
147
148 if (chdat->tx)
149 remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
150 else
151 remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
152
153 remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
154
155 /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
156 if (unlikely(remaining > chdat->transfer_len)) {
157 DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
158 chdat->tx ? "tx" : "rx", chdat->ch,
159 remaining);
160 remaining = 0;
161 }
162
163 channel->actual_len = chdat->transfer_len - remaining;
164 pio = chdat->len - channel->actual_len;
165
166 DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
167
168 /* Transfer remaining 1 - 31 bytes */
169 if (pio > 0 && pio < 32) {
170 u8 *buf;
171
172 DBG(3, "Using PIO for remaining %lu bytes\n", pio);
173 buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
174 if (chdat->tx) {
175 dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
176 chdat->transfer_len, DMA_TO_DEVICE);
177 musb_write_fifo(hw_ep, pio, buf);
178 } else {
179 musb_read_fifo(hw_ep, pio, buf);
180 dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
181 chdat->transfer_len, DMA_FROM_DEVICE);
182 }
183 channel->actual_len += pio;
184 }
185
186 if (!tusb_dma->multichannel)
187 tusb_omap_free_shared_dmareq(chdat);
188
189 channel->status = MUSB_DMA_STATUS_FREE;
190
191 /* Handle only RX callbacks here. TX callbacks must be handled based
192 * on the TUSB DMA status interrupt.
193 * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
194 * interrupt for RX and TX.
195 */
196 if (!chdat->tx)
197 musb_dma_completion(musb, chdat->epnum, chdat->tx);
198
199 /* We must terminate short tx transfers manually by setting TXPKTRDY.
200 * REVISIT: This same problem may occur with other MUSB dma as well.
201 * Easy to test with g_ether by pinging the MUSB board with ping -s54.
202 */
203 if ((chdat->transfer_len < chdat->packet_sz)
204 || (chdat->transfer_len % chdat->packet_sz != 0)) {
205 u16 csr;
206
207 if (chdat->tx) {
208 DBG(3, "terminating short tx packet\n");
209 musb_ep_select(mbase, chdat->epnum);
210 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
211 csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
212 | MUSB_TXCSR_P_WZC_BITS;
213 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
214 }
215 }
216
217 spin_unlock_irqrestore(&musb->lock, flags);
218}
219
220static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
221 u8 rndis_mode, dma_addr_t dma_addr, u32 len)
222{
223 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
224 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
225 struct musb *musb = chdat->musb;
226 struct musb_hw_ep *hw_ep = chdat->hw_ep;
227 void __iomem *mbase = musb->mregs;
228 void __iomem *ep_conf = hw_ep->conf;
229 dma_addr_t fifo = hw_ep->fifo_sync;
230 struct omap_dma_channel_params dma_params;
231 u32 dma_remaining;
232 int src_burst, dst_burst;
233 u16 csr;
234 int ch;
235 s8 dmareq;
236 s8 sync_dev;
237
238 if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
239 return false;
240
241 /*
242 * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
243 * register which will cause missed DMA interrupt. We could try to
244 * use a timer for the callback, but it is unsafe as the XFR_SIZE
245 * register is corrupt, and we won't know if the DMA worked.
246 */
247 if (dma_addr & 0x2)
248 return false;
249
250 /*
251 * Because of HW issue #10, it seems like mixing sync DMA and async
252 * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
253 * using the channel for DMA.
254 */
255 if (chdat->tx)
256 dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
257 else
258 dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
259
260 dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
261 if (dma_remaining) {
262 DBG(2, "Busy %s dma ch%i, not using: %08x\n",
263 chdat->tx ? "tx" : "rx", chdat->ch,
264 dma_remaining);
265 return false;
266 }
267
268 chdat->transfer_len = len & ~0x1f;
269
270 if (len < packet_sz)
271 chdat->transfer_packet_sz = chdat->transfer_len;
272 else
273 chdat->transfer_packet_sz = packet_sz;
274
275 if (tusb_dma->multichannel) {
276 ch = chdat->ch;
277 dmareq = chdat->dmareq;
278 sync_dev = chdat->sync_dev;
279 } else {
280 if (tusb_omap_use_shared_dmareq(chdat) != 0) {
281 DBG(3, "could not get dma for ep%i\n", chdat->epnum);
282 return false;
283 }
284 if (tusb_dma->ch < 0) {
285 /* REVISIT: This should get blocked earlier, happens
286 * with MSC ErrorRecoveryTest
287 */
288 WARN_ON(1);
289 return false;
290 }
291
292 ch = tusb_dma->ch;
293 dmareq = tusb_dma->dmareq;
294 sync_dev = tusb_dma->sync_dev;
295 omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
296 }
297
298 chdat->packet_sz = packet_sz;
299 chdat->len = len;
300 channel->actual_len = 0;
301 chdat->dma_addr = (void __iomem *)dma_addr;
302 channel->status = MUSB_DMA_STATUS_BUSY;
303
304 /* Since we're recycling dma areas, we need to clean or invalidate */
305 if (chdat->tx)
306 dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE);
307 else
308 dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE);
309
310 /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
311 if ((dma_addr & 0x3) == 0) {
312 dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
313 dma_params.elem_count = 8; /* Elements in frame */
314 } else {
315 dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
316 dma_params.elem_count = 16; /* Elements in frame */
317 fifo = hw_ep->fifo_async;
318 }
319
320 dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */
321
322 DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
323 chdat->epnum, chdat->tx ? "tx" : "rx",
324 ch, dma_addr, chdat->transfer_len, len,
325 chdat->transfer_packet_sz, packet_sz);
326
327 /*
328 * Prepare omap DMA for transfer
329 */
330 if (chdat->tx) {
331 dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
332 dma_params.src_start = (unsigned long)dma_addr;
333 dma_params.src_ei = 0;
334 dma_params.src_fi = 0;
335
336 dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
337 dma_params.dst_start = (unsigned long)fifo;
338 dma_params.dst_ei = 1;
339 dma_params.dst_fi = -31; /* Loop 32 byte window */
340
341 dma_params.trigger = sync_dev;
342 dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
343 dma_params.src_or_dst_synch = 0; /* Dest sync */
344
345 src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */
346 dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */
347 } else {
348 dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
349 dma_params.src_start = (unsigned long)fifo;
350 dma_params.src_ei = 1;
351 dma_params.src_fi = -31; /* Loop 32 byte window */
352
353 dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
354 dma_params.dst_start = (unsigned long)dma_addr;
355 dma_params.dst_ei = 0;
356 dma_params.dst_fi = 0;
357
358 dma_params.trigger = sync_dev;
359 dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
360 dma_params.src_or_dst_synch = 1; /* Source sync */
361
362 src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */
363 dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */
364 }
365
366 DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
367 chdat->epnum, chdat->tx ? "tx" : "rx",
368 (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
369 ((dma_addr & 0x3) == 0) ? "sync" : "async",
370 dma_params.src_start, dma_params.dst_start);
371
372 omap_set_dma_params(ch, &dma_params);
373 omap_set_dma_src_burst_mode(ch, src_burst);
374 omap_set_dma_dest_burst_mode(ch, dst_burst);
375 omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
376
377 /*
378 * Prepare MUSB for DMA transfer
379 */
380 if (chdat->tx) {
381 musb_ep_select(mbase, chdat->epnum);
382 csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
383 csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
384 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
385 csr &= ~MUSB_TXCSR_P_UNDERRUN;
386 musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
387 } else {
388 musb_ep_select(mbase, chdat->epnum);
389 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
390 csr |= MUSB_RXCSR_DMAENAB;
391 csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
392 musb_writew(hw_ep->regs, MUSB_RXCSR,
393 csr | MUSB_RXCSR_P_WZC_BITS);
394 }
395
396 /*
397 * Start DMA transfer
398 */
399 omap_start_dma(ch);
400
401 if (chdat->tx) {
402 /* Send transfer_packet_sz packets at a time */
403 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
404 chdat->transfer_packet_sz);
405
406 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
407 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
408 } else {
409 /* Receive transfer_packet_sz packets at a time */
410 musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
411 chdat->transfer_packet_sz << 16);
412
413 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
414 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
415 }
416
417 return true;
418}
419
420static int tusb_omap_dma_abort(struct dma_channel *channel)
421{
422 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
423 struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
424
425 if (!tusb_dma->multichannel) {
426 if (tusb_dma->ch >= 0) {
427 omap_stop_dma(tusb_dma->ch);
428 omap_free_dma(tusb_dma->ch);
429 tusb_dma->ch = -1;
430 }
431
432 tusb_dma->dmareq = -1;
433 tusb_dma->sync_dev = -1;
434 }
435
436 channel->status = MUSB_DMA_STATUS_FREE;
437
438 return 0;
439}
440
441static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
442{
443 u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
444 int i, dmareq_nr = -1;
445
446 const int sync_dev[6] = {
447 OMAP24XX_DMA_EXT_DMAREQ0,
448 OMAP24XX_DMA_EXT_DMAREQ1,
449 OMAP242X_DMA_EXT_DMAREQ2,
450 OMAP242X_DMA_EXT_DMAREQ3,
451 OMAP242X_DMA_EXT_DMAREQ4,
452 OMAP242X_DMA_EXT_DMAREQ5,
453 };
454
455 for (i = 0; i < MAX_DMAREQ; i++) {
456 int cur = (reg & (0xf << (i * 5))) >> (i * 5);
457 if (cur == 0) {
458 dmareq_nr = i;
459 break;
460 }
461 }
462
463 if (dmareq_nr == -1)
464 return -EAGAIN;
465
466 reg |= (chdat->epnum << (dmareq_nr * 5));
467 if (chdat->tx)
468 reg |= ((1 << 4) << (dmareq_nr * 5));
469 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
470
471 chdat->dmareq = dmareq_nr;
472 chdat->sync_dev = sync_dev[chdat->dmareq];
473
474 return 0;
475}
476
477static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
478{
479 u32 reg;
480
481 if (!chdat || chdat->dmareq < 0)
482 return;
483
484 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
485 reg &= ~(0x1f << (chdat->dmareq * 5));
486 musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
487
488 chdat->dmareq = -1;
489 chdat->sync_dev = -1;
490}
491
492static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
493
494static struct dma_channel *
495tusb_omap_dma_allocate(struct dma_controller *c,
496 struct musb_hw_ep *hw_ep,
497 u8 tx)
498{
499 int ret, i;
500 const char *dev_name;
501 struct tusb_omap_dma *tusb_dma;
502 struct musb *musb;
503 void __iomem *tbase;
504 struct dma_channel *channel = NULL;
505 struct tusb_omap_dma_ch *chdat = NULL;
506 u32 reg;
507
508 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
509 musb = tusb_dma->musb;
510 tbase = musb->ctrl_base;
511
512 reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
513 if (tx)
514 reg &= ~(1 << hw_ep->epnum);
515 else
516 reg &= ~(1 << (hw_ep->epnum + 15));
517 musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
518
519 /* REVISIT: Why does dmareq5 not work? */
520 if (hw_ep->epnum == 0) {
521 DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
522 return NULL;
523 }
524
525 for (i = 0; i < MAX_DMAREQ; i++) {
526 struct dma_channel *ch = dma_channel_pool[i];
527 if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
528 ch->status = MUSB_DMA_STATUS_FREE;
529 channel = ch;
530 chdat = ch->private_data;
531 break;
532 }
533 }
534
535 if (!channel)
536 return NULL;
537
538 if (tx) {
539 chdat->tx = 1;
540 dev_name = "TUSB transmit";
541 } else {
542 chdat->tx = 0;
543 dev_name = "TUSB receive";
544 }
545
546 chdat->musb = tusb_dma->musb;
547 chdat->tbase = tusb_dma->tbase;
548 chdat->hw_ep = hw_ep;
549 chdat->epnum = hw_ep->epnum;
550 chdat->dmareq = -1;
551 chdat->completed_len = 0;
552 chdat->tusb_dma = tusb_dma;
553
554 channel->max_len = 0x7fffffff;
555 channel->desired_mode = 0;
556 channel->actual_len = 0;
557
558 if (tusb_dma->multichannel) {
559 ret = tusb_omap_dma_allocate_dmareq(chdat);
560 if (ret != 0)
561 goto free_dmareq;
562
563 ret = omap_request_dma(chdat->sync_dev, dev_name,
564 tusb_omap_dma_cb, channel, &chdat->ch);
565 if (ret != 0)
566 goto free_dmareq;
567 } else if (tusb_dma->ch == -1) {
568 tusb_dma->dmareq = 0;
569 tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;
570
571 /* Callback data gets set later in the shared dmareq case */
572 ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
573 tusb_omap_dma_cb, NULL, &tusb_dma->ch);
574 if (ret != 0)
575 goto free_dmareq;
576
577 chdat->dmareq = -1;
578 chdat->ch = -1;
579 }
580
581 DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
582 chdat->epnum,
583 chdat->tx ? "tx" : "rx",
584 chdat->ch >= 0 ? "dedicated" : "shared",
585 chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
586 chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
587 chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);
588
589 return channel;
590
591free_dmareq:
592 tusb_omap_dma_free_dmareq(chdat);
593
594 DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum);
595 channel->status = MUSB_DMA_STATUS_UNKNOWN;
596
597 return NULL;
598}
599
600static void tusb_omap_dma_release(struct dma_channel *channel)
601{
602 struct tusb_omap_dma_ch *chdat = to_chdat(channel);
603 struct musb *musb = chdat->musb;
604 void __iomem *tbase = musb->ctrl_base;
605 u32 reg;
606
607 DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch);
608
609 reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
610 if (chdat->tx)
611 reg |= (1 << chdat->epnum);
612 else
613 reg |= (1 << (chdat->epnum + 15));
614 musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
615
616 reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR);
617 if (chdat->tx)
618 reg |= (1 << chdat->epnum);
619 else
620 reg |= (1 << (chdat->epnum + 15));
621 musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg);
622
623 channel->status = MUSB_DMA_STATUS_UNKNOWN;
624
625 if (chdat->ch >= 0) {
626 omap_stop_dma(chdat->ch);
627 omap_free_dma(chdat->ch);
628 chdat->ch = -1;
629 }
630
631 if (chdat->dmareq >= 0)
632 tusb_omap_dma_free_dmareq(chdat);
633
634 channel = NULL;
635}
636
637void dma_controller_destroy(struct dma_controller *c)
638{
639 struct tusb_omap_dma *tusb_dma;
640 int i;
641
642 tusb_dma = container_of(c, struct tusb_omap_dma, controller);
643 for (i = 0; i < MAX_DMAREQ; i++) {
644 struct dma_channel *ch = dma_channel_pool[i];
645 if (ch) {
646 kfree(ch->private_data);
647 kfree(ch);
648 }
649 }
650
651 if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
652 omap_free_dma(tusb_dma->ch);
653
654 kfree(tusb_dma);
655}
656
657struct dma_controller *__init
658dma_controller_create(struct musb *musb, void __iomem *base)
659{
660 void __iomem *tbase = musb->ctrl_base;
661 struct tusb_omap_dma *tusb_dma;
662 int i;
663
664 /* REVISIT: Get dmareq lines used from board-*.c */
665
666 musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
667 musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
668
669 musb_writel(tbase, TUSB_DMA_REQ_CONF,
670 TUSB_DMA_REQ_CONF_BURST_SIZE(2)
671 | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
672 | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
673
674 tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
675 if (!tusb_dma)
676 goto cleanup;
677
678 tusb_dma->musb = musb;
679 tusb_dma->tbase = musb->ctrl_base;
680
681 tusb_dma->ch = -1;
682 tusb_dma->dmareq = -1;
683 tusb_dma->sync_dev = -1;
684
685 tusb_dma->controller.start = tusb_omap_dma_start;
686 tusb_dma->controller.stop = tusb_omap_dma_stop;
687 tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
688 tusb_dma->controller.channel_release = tusb_omap_dma_release;
689 tusb_dma->controller.channel_program = tusb_omap_dma_program;
690 tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
691
692 if (tusb_get_revision(musb) >= TUSB_REV_30)
693 tusb_dma->multichannel = 1;
694
695 for (i = 0; i < MAX_DMAREQ; i++) {
696 struct dma_channel *ch;
697 struct tusb_omap_dma_ch *chdat;
698
699 ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
700 if (!ch)
701 goto cleanup;
702
703 dma_channel_pool[i] = ch;
704
705 chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
706 if (!chdat)
707 goto cleanup;
708
709 ch->status = MUSB_DMA_STATUS_UNKNOWN;
710 ch->private_data = chdat;
711 }
712
713 return &tusb_dma->controller;
714
715cleanup:
716 dma_controller_destroy(&tusb_dma->controller);
717
718 return NULL;
719}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 8878c1767fc8..70338f4ec918 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -499,9 +499,10 @@ config USB_SERIAL_SAFE_PADDED
499config USB_SERIAL_SIERRAWIRELESS 499config USB_SERIAL_SIERRAWIRELESS
500 tristate "USB Sierra Wireless Driver" 500 tristate "USB Sierra Wireless Driver"
501 help 501 help
502 Say M here if you want to use a Sierra Wireless device (if 502 Say M here if you want to use Sierra Wireless devices.
503 using an PC 5220 or AC580 please use the Airprime driver 503
504 instead). 504 Many deviecs have a feature known as TRU-Install, for those devices
505 to work properly the USB Storage Sierra feature must be enabled.
505 506
506 To compile this driver as a module, choose M here: the 507 To compile this driver as a module, choose M here: the
507 module will be called sierra. 508 module will be called sierra.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 838717250145..984f6eff4c47 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -563,6 +563,7 @@ static struct usb_device_id id_table_combined [] = {
563 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, 563 { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
564 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, 564 { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
565 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, 565 { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
566 { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
566 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, 567 { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
567 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, 568 { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
568 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, 569 { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -637,6 +638,7 @@ static struct usb_device_id id_table_combined [] = {
637 { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) }, 638 { USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) },
638 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 639 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
639 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 640 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
641 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
640 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 642 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
641 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, 643 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
642 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, 644 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
@@ -646,6 +648,10 @@ static struct usb_device_id id_table_combined [] = {
646 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 648 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
647 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), 649 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
648 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 650 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
651 { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID),
652 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
653 { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
654 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
649 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, 655 { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
650 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, 656 { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
651 { }, /* Optional parameter entry */ 657 { }, /* Optional parameter entry */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index a577ea44dcf9..382265bba969 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -524,7 +524,9 @@
524#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */ 524#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
525#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */ 525#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
526#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */ 526#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
527#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
527#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ 528#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
529#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
528 530
529/* 531/*
530 * Definitions for ID TECH (www.idt-net.com) devices 532 * Definitions for ID TECH (www.idt-net.com) devices
@@ -815,6 +817,11 @@
815#define OLIMEX_VID 0x15BA 817#define OLIMEX_VID 0x15BA
816#define OLIMEX_ARM_USB_OCD_PID 0x0003 818#define OLIMEX_ARM_USB_OCD_PID 0x0003
817 819
820/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
821/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
822#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
823#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
824
818/* www.elsterelectricity.com Elster Unicom III Optical Probe */ 825/* www.elsterelectricity.com Elster Unicom III Optical Probe */
819#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */ 826#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
820 827
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e4eca95f2b0f..e143198aeb02 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -186,6 +186,23 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
186#define BANDRICH_VENDOR_ID 0x1A8D 186#define BANDRICH_VENDOR_ID 0x1A8D
187#define BANDRICH_PRODUCT_C100_1 0x1002 187#define BANDRICH_PRODUCT_C100_1 0x1002
188#define BANDRICH_PRODUCT_C100_2 0x1003 188#define BANDRICH_PRODUCT_C100_2 0x1003
189#define BANDRICH_PRODUCT_1004 0x1004
190#define BANDRICH_PRODUCT_1005 0x1005
191#define BANDRICH_PRODUCT_1006 0x1006
192#define BANDRICH_PRODUCT_1007 0x1007
193#define BANDRICH_PRODUCT_1008 0x1008
194#define BANDRICH_PRODUCT_1009 0x1009
195#define BANDRICH_PRODUCT_100A 0x100a
196
197#define BANDRICH_PRODUCT_100B 0x100b
198#define BANDRICH_PRODUCT_100C 0x100c
199#define BANDRICH_PRODUCT_100D 0x100d
200#define BANDRICH_PRODUCT_100E 0x100e
201
202#define BANDRICH_PRODUCT_100F 0x100f
203#define BANDRICH_PRODUCT_1010 0x1010
204#define BANDRICH_PRODUCT_1011 0x1011
205#define BANDRICH_PRODUCT_1012 0x1012
189 206
190#define AMOI_VENDOR_ID 0x1614 207#define AMOI_VENDOR_ID 0x1614
191#define AMOI_PRODUCT_9508 0x0800 208#define AMOI_PRODUCT_9508 0x0800
@@ -197,6 +214,10 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
197#define TELIT_VENDOR_ID 0x1bc7 214#define TELIT_VENDOR_ID 0x1bc7
198#define TELIT_PRODUCT_UC864E 0x1003 215#define TELIT_PRODUCT_UC864E 0x1003
199 216
217/* ZTE PRODUCTS */
218#define ZTE_VENDOR_ID 0x19d2
219#define ZTE_PRODUCT_MF628 0x0015
220
200static struct usb_device_id option_ids[] = { 221static struct usb_device_id option_ids[] = {
201 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 222 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
202 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 223 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -302,12 +323,28 @@ static struct usb_device_id option_ids[] = {
302 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) }, 323 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
303 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, 324 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
304 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, 325 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
326 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) },
327 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) },
328 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) },
329 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) },
330 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) },
331 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) },
332 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) },
333 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) },
334 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) },
335 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) },
336 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) },
337 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) },
338 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) },
339 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) },
340 { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) },
305 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 341 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
306 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 342 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
307 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 343 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
308 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 344 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
309 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ 345 { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
310 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, 346 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
347 { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
311 { } /* Terminating entry */ 348 { } /* Terminating entry */
312}; 349};
313MODULE_DEVICE_TABLE(usb, option_ids); 350MODULE_DEVICE_TABLE(usb, option_ids);
@@ -346,11 +383,7 @@ static struct usb_serial_driver option_1port_device = {
346 .read_int_callback = option_instat_callback, 383 .read_int_callback = option_instat_callback,
347}; 384};
348 385
349#ifdef CONFIG_USB_DEBUG
350static int debug; 386static int debug;
351#else
352#define debug 0
353#endif
354 387
355/* per port private data */ 388/* per port private data */
356 389
@@ -954,8 +987,5 @@ MODULE_DESCRIPTION(DRIVER_DESC);
954MODULE_VERSION(DRIVER_VERSION); 987MODULE_VERSION(DRIVER_VERSION);
955MODULE_LICENSE("GPL"); 988MODULE_LICENSE("GPL");
956 989
957#ifdef CONFIG_USB_DEBUG
958module_param(debug, bool, S_IRUGO | S_IWUSR); 990module_param(debug, bool, S_IRUGO | S_IWUSR);
959MODULE_PARM_DESC(debug, "Debug messages"); 991MODULE_PARM_DESC(debug, "Debug messages");
960#endif
961
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 2c9c446ad625..1ede1441cb1b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -90,7 +90,6 @@ static struct usb_device_id id_table [] = {
90 { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) }, 90 { USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
91 { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) }, 91 { USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
92 { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) }, 92 { USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
93 { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) },
94 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) }, 93 { USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
95 { } /* Terminating entry */ 94 { } /* Terminating entry */
96}; 95};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 6ac3bbcf7a22..a3bd039c78e9 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -107,10 +107,6 @@
107#define COREGA_VENDOR_ID 0x07aa 107#define COREGA_VENDOR_ID 0x07aa
108#define COREGA_PRODUCT_ID 0x002a 108#define COREGA_PRODUCT_ID 0x002a
109 109
110/* HL HL-340 (ID: 4348:5523) */
111#define HL340_VENDOR_ID 0x4348
112#define HL340_PRODUCT_ID 0x5523
113
114/* Y.C. Cable U.S.A., Inc - USB to RS-232 */ 110/* Y.C. Cable U.S.A., Inc - USB to RS-232 */
115#define YCCABLE_VENDOR_ID 0x05ad 111#define YCCABLE_VENDOR_ID 0x05ad
116#define YCCABLE_PRODUCT_ID 0x0fba 112#define YCCABLE_PRODUCT_ID 0x0fba
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 2f6f1523ec56..706033753adb 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -14,7 +14,7 @@
14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> 14 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
15*/ 15*/
16 16
17#define DRIVER_VERSION "v.1.2.9c" 17#define DRIVER_VERSION "v.1.2.13a"
18#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" 18#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
19#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" 19#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
20 20
@@ -31,6 +31,7 @@
31#define SWIMS_USB_REQUEST_SetPower 0x00 31#define SWIMS_USB_REQUEST_SetPower 0x00
32#define SWIMS_USB_REQUEST_SetNmea 0x07 32#define SWIMS_USB_REQUEST_SetNmea 0x07
33#define SWIMS_USB_REQUEST_SetMode 0x0B 33#define SWIMS_USB_REQUEST_SetMode 0x0B
34#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
34#define SWIMS_SET_MODE_Modem 0x0001 35#define SWIMS_SET_MODE_Modem 0x0001
35 36
36/* per port private data */ 37/* per port private data */
@@ -40,18 +41,11 @@
40 41
41static int debug; 42static int debug;
42static int nmea; 43static int nmea;
43static int truinstall = 1;
44
45enum devicetype {
46 DEVICE_3_PORT = 0,
47 DEVICE_1_PORT = 1,
48 DEVICE_INSTALLER = 2,
49};
50 44
51static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) 45static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
52{ 46{
53 int result; 47 int result;
54 dev_dbg(&udev->dev, "%s", "SET POWER STATE\n"); 48 dev_dbg(&udev->dev, "%s", __func__);
55 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 49 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
56 SWIMS_USB_REQUEST_SetPower, /* __u8 request */ 50 SWIMS_USB_REQUEST_SetPower, /* __u8 request */
57 USB_TYPE_VENDOR, /* __u8 request type */ 51 USB_TYPE_VENDOR, /* __u8 request type */
@@ -63,25 +57,10 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
63 return result; 57 return result;
64} 58}
65 59
66static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
67{
68 int result;
69 dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH\n");
70 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
71 SWIMS_USB_REQUEST_SetMode, /* __u8 request */
72 USB_TYPE_VENDOR, /* __u8 request type */
73 eSWocMode, /* __u16 value */
74 0x0000, /* __u16 index */
75 NULL, /* void *data */
76 0, /* __u16 size */
77 USB_CTRL_SET_TIMEOUT); /* int timeout */
78 return result;
79}
80
81static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) 60static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
82{ 61{
83 int result; 62 int result;
84 dev_dbg(&udev->dev, "%s", "NMEA Enable sent\n"); 63 dev_dbg(&udev->dev, "%s", __func__);
85 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 64 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
86 SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ 65 SWIMS_USB_REQUEST_SetNmea, /* __u8 request */
87 USB_TYPE_VENDOR, /* __u8 request type */ 66 USB_TYPE_VENDOR, /* __u8 request type */
@@ -97,6 +76,7 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
97{ 76{
98 int result; 77 int result;
99 int *num_ports = usb_get_serial_data(serial); 78 int *num_ports = usb_get_serial_data(serial);
79 dev_dbg(&serial->dev->dev, "%s", __func__);
100 80
101 result = *num_ports; 81 result = *num_ports;
102 82
@@ -110,22 +90,23 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
110 90
111static int sierra_calc_interface(struct usb_serial *serial) 91static int sierra_calc_interface(struct usb_serial *serial)
112{ 92{
113 int interface; 93 int interface;
114 struct usb_interface *p_interface; 94 struct usb_interface *p_interface;
115 struct usb_host_interface *p_host_interface; 95 struct usb_host_interface *p_host_interface;
96 dev_dbg(&serial->dev->dev, "%s", __func__);
116 97
117 /* Get the interface structure pointer from the serial struct */ 98 /* Get the interface structure pointer from the serial struct */
118 p_interface = serial->interface; 99 p_interface = serial->interface;
119 100
120 /* Get a pointer to the host interface structure */ 101 /* Get a pointer to the host interface structure */
121 p_host_interface = p_interface->cur_altsetting; 102 p_host_interface = p_interface->cur_altsetting;
122 103
123 /* read the interface descriptor for this active altsetting 104 /* read the interface descriptor for this active altsetting
124 * to find out the interface number we are on 105 * to find out the interface number we are on
125 */ 106 */
126 interface = p_host_interface->desc.bInterfaceNumber; 107 interface = p_host_interface->desc.bInterfaceNumber;
127 108
128 return interface; 109 return interface;
129} 110}
130 111
131static int sierra_probe(struct usb_serial *serial, 112static int sierra_probe(struct usb_serial *serial,
@@ -135,43 +116,40 @@ static int sierra_probe(struct usb_serial *serial,
135 struct usb_device *udev; 116 struct usb_device *udev;
136 int *num_ports; 117 int *num_ports;
137 u8 ifnum; 118 u8 ifnum;
119 u8 numendpoints;
120
121 dev_dbg(&serial->dev->dev, "%s", __func__);
138 122
139 num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL); 123 num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
140 if (!num_ports) 124 if (!num_ports)
141 return -ENOMEM; 125 return -ENOMEM;
142 126
143 ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; 127 ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
128 numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
144 udev = serial->dev; 129 udev = serial->dev;
145 130
146 /* Figure out the interface number from the serial structure */ 131 /* Figure out the interface number from the serial structure */
147 ifnum = sierra_calc_interface(serial); 132 ifnum = sierra_calc_interface(serial);
148
149 /*
150 * If this interface supports more than 1 alternate
151 * select the 2nd one
152 */
153 if (serial->interface->num_altsetting == 2) {
154 dev_dbg(&udev->dev,
155 "Selecting alt setting for interface %d\n",
156 ifnum);
157 133
158 /* We know the alternate setting is 1 for the MC8785 */ 134 /*
159 usb_set_interface(udev, ifnum, 1); 135 * If this interface supports more than 1 alternate
160 } 136 * select the 2nd one
137 */
138 if (serial->interface->num_altsetting == 2) {
139 dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n",
140 ifnum);
141 /* We know the alternate setting is 1 for the MC8785 */
142 usb_set_interface(udev, ifnum, 1);
143 }
161 144
162 /* Check if in installer mode */ 145 /* Dummy interface present on some SKUs should be ignored */
163 if (truinstall && id->driver_info == DEVICE_INSTALLER) { 146 if (ifnum == 0x99)
164 dev_dbg(&udev->dev, "%s", "FOUND TRU-INSTALL DEVICE(SW)\n");
165 result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
166 /* Don't bind to the device when in installer mode */
167 kfree(num_ports);
168 return -EIO;
169 } else if (id->driver_info == DEVICE_1_PORT)
170 *num_ports = 1;
171 else if (ifnum == 0x99)
172 *num_ports = 0; 147 *num_ports = 0;
148 else if (numendpoints <= 3)
149 *num_ports = 1;
173 else 150 else
174 *num_ports = 3; 151 *num_ports = (numendpoints-1)/2;
152
175 /* 153 /*
176 * save off our num_ports info so that we can use it in the 154 * save off our num_ports info so that we can use it in the
177 * calc_num_ports callback 155 * calc_num_ports callback
@@ -187,40 +165,50 @@ static struct usb_device_id id_table [] = {
187 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ 165 { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
188 { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ 166 { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
189 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ 167 { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
168 { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
190 { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ 169 { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
191 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ 170 { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
192 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ 171 { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
193 { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ 172 { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
194 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless C597 */ 173 /* Sierra Wireless C597 */
174 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
175 /* Sierra Wireless Device */
176 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
177 { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */
195 178
196 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ 179 { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
197 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ 180 { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
198 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ 181 { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
199 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ 182 { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
200 { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */ 183 { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */
201 { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ 184 { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */
202 { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */ 185 { USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */
203 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ 186 { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
204 { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ 187 { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */
205 { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/ 188 { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */
206 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/ 189 { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
207 { USB_DEVICE(0x1199, 0x683B), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless MC8785 Composite*/ 190 { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
191 { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */
192 { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */
193 { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */
208 { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ 194 { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
209 { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ 195 { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
210 { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ 196 { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
211 { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ 197 { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */
212 { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */ 198 { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */
213 { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ 199 { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
214 { USB_DEVICE(0x1199, 0x6859), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ 200 { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
215 { USB_DEVICE(0x1199, 0x685A), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */ 201 { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
216 202 /* Sierra Wireless C885 */
217 { USB_DEVICE(0x1199, 0x6468) }, /* Sierra Wireless MP3G - EVDO */ 203 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
218 { USB_DEVICE(0x1199, 0x6469) }, /* Sierra Wireless MP3G - UMTS/HSPA */ 204 /* Sierra Wireless Device */
219 205 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
220 { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */ 206 /* Sierra Wireless Device */
221 { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */ 207 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
208
209 { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
210 { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
222 211
223 { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER},
224 { } 212 { }
225}; 213};
226MODULE_DEVICE_TABLE(usb, id_table); 214MODULE_DEVICE_TABLE(usb, id_table);
@@ -268,13 +256,19 @@ static int sierra_send_setup(struct tty_struct *tty,
268 if (portdata->rts_state) 256 if (portdata->rts_state)
269 val |= 0x02; 257 val |= 0x02;
270 258
271 /* Determine which port is targeted */ 259 /* If composite device then properly report interface */
272 if (port->bulk_out_endpointAddress == 2) 260 if (serial->num_ports == 1)
273 interface = 0; 261 interface = sierra_calc_interface(serial);
274 else if (port->bulk_out_endpointAddress == 4) 262
275 interface = 1; 263 /* Otherwise the need to do non-composite mapping */
276 else if (port->bulk_out_endpointAddress == 5) 264 else {
277 interface = 2; 265 if (port->bulk_out_endpointAddress == 2)
266 interface = 0;
267 else if (port->bulk_out_endpointAddress == 4)
268 interface = 1;
269 else if (port->bulk_out_endpointAddress == 5)
270 interface = 2;
271 }
278 272
279 return usb_control_msg(serial->dev, 273 return usb_control_msg(serial->dev,
280 usb_rcvctrlpipe(serial->dev, 0), 274 usb_rcvctrlpipe(serial->dev, 0),
@@ -713,7 +707,7 @@ static void sierra_shutdown(struct usb_serial *serial)
713static struct usb_serial_driver sierra_device = { 707static struct usb_serial_driver sierra_device = {
714 .driver = { 708 .driver = {
715 .owner = THIS_MODULE, 709 .owner = THIS_MODULE,
716 .name = "sierra1", 710 .name = "sierra",
717 }, 711 },
718 .description = "Sierra USB modem", 712 .description = "Sierra USB modem",
719 .id_table = id_table, 713 .id_table = id_table,
@@ -769,14 +763,8 @@ MODULE_DESCRIPTION(DRIVER_DESC);
769MODULE_VERSION(DRIVER_VERSION); 763MODULE_VERSION(DRIVER_VERSION);
770MODULE_LICENSE("GPL"); 764MODULE_LICENSE("GPL");
771 765
772module_param(truinstall, bool, 0); 766module_param(nmea, bool, S_IRUGO | S_IWUSR);
773MODULE_PARM_DESC(truinstall, "TRU-Install support");
774
775module_param(nmea, bool, 0);
776MODULE_PARM_DESC(nmea, "NMEA streaming"); 767MODULE_PARM_DESC(nmea, "NMEA streaming");
777 768
778#ifdef CONFIG_USB_DEBUG
779module_param(debug, bool, S_IRUGO | S_IWUSR); 769module_param(debug, bool, S_IRUGO | S_IWUSR);
780MODULE_PARM_DESC(debug, "Debug messages"); 770MODULE_PARM_DESC(debug, "Debug messages");
781#endif
782
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8c2d531eedea..b157c48e8b78 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -122,9 +122,6 @@ static void return_serial(struct usb_serial *serial)
122 122
123 dbg("%s", __func__); 123 dbg("%s", __func__);
124 124
125 if (serial == NULL)
126 return;
127
128 for (i = 0; i < serial->num_ports; ++i) 125 for (i = 0; i < serial->num_ports; ++i)
129 serial_table[serial->minor + i] = NULL; 126 serial_table[serial->minor + i] = NULL;
130} 127}
@@ -142,7 +139,8 @@ static void destroy_serial(struct kref *kref)
142 serial->type->shutdown(serial); 139 serial->type->shutdown(serial);
143 140
144 /* return the minor range that this device had */ 141 /* return the minor range that this device had */
145 return_serial(serial); 142 if (serial->minor != SERIAL_TTY_NO_MINOR)
143 return_serial(serial);
146 144
147 for (i = 0; i < serial->num_ports; ++i) 145 for (i = 0; i < serial->num_ports; ++i)
148 serial->port[i]->port.count = 0; 146 serial->port[i]->port.count = 0;
@@ -575,6 +573,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
575 serial->interface = interface; 573 serial->interface = interface;
576 kref_init(&serial->kref); 574 kref_init(&serial->kref);
577 mutex_init(&serial->disc_mutex); 575 mutex_init(&serial->disc_mutex);
576 serial->minor = SERIAL_TTY_NO_MINOR;
578 577
579 return serial; 578 return serial;
580} 579}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 3d9249632ae1..c76034672c18 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -146,6 +146,18 @@ config USB_STORAGE_KARMA
146 on the resulting scsi device node returns the Karma to normal 146 on the resulting scsi device node returns the Karma to normal
147 operation. 147 operation.
148 148
149config USB_STORAGE_SIERRA
150 bool "Sierra Wireless TRU-Install Feature Support"
151 depends on USB_STORAGE
152 help
153 Say Y here to include additional code to support Sierra Wireless
154 products with the TRU-Install feature (e.g., AC597E, AC881U).
155
156 This code switches the Sierra Wireless device from being in
157 Mass Storage mode to Modem mode. It also has the ability to
158 support host software upgrades should full Linux support be added
159 to TRU-Install.
160
149config USB_STORAGE_CYPRESS_ATACB 161config USB_STORAGE_CYPRESS_ATACB
150 bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" 162 bool "SAT emulation on Cypress USB/ATA Bridge with ATACB"
151 depends on USB_STORAGE 163 depends on USB_STORAGE
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index 4c596c766c53..bc3415b475c9 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -21,6 +21,7 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o
21usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o 21usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o
22usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o 22usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o
23usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o 23usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o
24usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o
24usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o 25usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o
25 26
26usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ 27usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
new file mode 100644
index 000000000000..4359a2cb42df
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.c
@@ -0,0 +1,207 @@
1#include <scsi/scsi.h>
2#include <scsi/scsi_host.h>
3#include <scsi/scsi_cmnd.h>
4#include <scsi/scsi_device.h>
5#include <linux/usb.h>
6
7#include "usb.h"
8#include "transport.h"
9#include "protocol.h"
10#include "scsiglue.h"
11#include "sierra_ms.h"
12#include "debug.h"
13
14#define SWIMS_USB_REQUEST_SetSwocMode 0x0B
15#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
16#define SWIMS_USB_INDEX_SetMode 0x0000
17#define SWIMS_SET_MODE_Modem 0x0001
18
19#define TRU_NORMAL 0x01
20#define TRU_FORCE_MS 0x02
21#define TRU_FORCE_MODEM 0x03
22
23static unsigned int swi_tru_install = 1;
24module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR);
25MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def),"
26 " 2=Force CD-Rom, 3=Force Modem)");
27
28struct swoc_info {
29 __u8 rev;
30 __u8 reserved[8];
31 __u16 LinuxSKU;
32 __u16 LinuxVer;
33 __u8 reserved2[47];
34} __attribute__((__packed__));
35
36static bool containsFullLinuxPackage(struct swoc_info *swocInfo)
37{
38 if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) ||
39 (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF))
40 return true;
41 else
42 return false;
43}
44
45static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
46{
47 int result;
48 US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n");
49 result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
50 SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */
51 USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */
52 eSWocMode, /* __u16 value */
53 0x0000, /* __u16 index */
54 NULL, /* void *data */
55 0, /* __u16 size */
56 USB_CTRL_SET_TIMEOUT); /* int timeout */
57 return result;
58}
59
60
61static int sierra_get_swoc_info(struct usb_device *udev,
62 struct swoc_info *swocInfo)
63{
64 int result;
65
66 US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n");
67
68 result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
69 SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */
70 USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */
71 0, /* __u16 value */
72 0, /* __u16 index */
73 (void *) swocInfo, /* void *data */
74 sizeof(struct swoc_info), /* __u16 size */
75 USB_CTRL_SET_TIMEOUT); /* int timeout */
76
77 swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU);
78 swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer);
79 return result;
80}
81
82static void debug_swoc(struct swoc_info *swocInfo)
83{
84 US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev);
85 US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU);
86 US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer);
87}
88
89
90static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
91 char *buf)
92{
93 struct swoc_info *swocInfo;
94 struct usb_interface *intf = to_usb_interface(dev);
95 struct usb_device *udev = interface_to_usbdev(intf);
96 int result;
97 if (swi_tru_install == TRU_FORCE_MS) {
98 result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n");
99 } else {
100 swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL);
101 if (!swocInfo) {
102 US_DEBUGP("SWIMS: Allocation failure\n");
103 snprintf(buf, PAGE_SIZE, "Error\n");
104 return -ENOMEM;
105 }
106 result = sierra_get_swoc_info(udev, swocInfo);
107 if (result < 0) {
108 US_DEBUGP("SWIMS: failed SWoC query\n");
109 kfree(swocInfo);
110 snprintf(buf, PAGE_SIZE, "Error\n");
111 return -EIO;
112 }
113 debug_swoc(swocInfo);
114 result = snprintf(buf, PAGE_SIZE,
115 "REV=%02d SKU=%04X VER=%04X\n",
116 swocInfo->rev,
117 swocInfo->LinuxSKU,
118 swocInfo->LinuxVer);
119 kfree(swocInfo);
120 }
121 return result;
122}
123static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
124
125int sierra_ms_init(struct us_data *us)
126{
127 int result, retries;
128 signed long delay_t;
129 struct swoc_info *swocInfo;
130 struct usb_device *udev;
131 struct Scsi_Host *sh;
132 struct scsi_device *sd;
133
134 delay_t = 2;
135 retries = 3;
136 result = 0;
137 udev = us->pusb_dev;
138
139 sh = us_to_host(us);
140 sd = scsi_get_host_dev(sh);
141
142 US_DEBUGP("SWIMS: sierra_ms_init called\n");
143
144 /* Force Modem mode */
145 if (swi_tru_install == TRU_FORCE_MODEM) {
146 US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n");
147 result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
148 if (result < 0)
149 US_DEBUGP("SWIMS: Failed to switch to modem mode.\n");
150 return -EIO;
151 }
152 /* Force Mass Storage mode (keep CD-Rom) */
153 else if (swi_tru_install == TRU_FORCE_MS) {
154 US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n");
155 goto complete;
156 }
157 /* Normal TRU-Install Logic */
158 else {
159 US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n");
160
161 swocInfo = kmalloc(sizeof(struct swoc_info),
162 GFP_KERNEL);
163 if (!swocInfo) {
164 US_DEBUGP("SWIMS: %s", "Allocation failure\n");
165 return -ENOMEM;
166 }
167
168 retries = 3;
169 do {
170 retries--;
171 result = sierra_get_swoc_info(udev, swocInfo);
172 if (result < 0) {
173 US_DEBUGP("SWIMS: %s", "Failed SWoC query\n");
174 schedule_timeout_uninterruptible(2*HZ);
175 }
176 } while (retries && result < 0);
177
178 if (result < 0) {
179 US_DEBUGP("SWIMS: %s",
180 "Completely failed SWoC query\n");
181 kfree(swocInfo);
182 return -EIO;
183 }
184
185 debug_swoc(swocInfo);
186
187 /* If there is not Linux software on the TRU-Install device
188 * then switch to modem mode
189 */
190 if (!containsFullLinuxPackage(swocInfo)) {
191 US_DEBUGP("SWIMS: %s",
192 "Switching to Modem Mode\n");
193 result = sierra_set_ms_mode(udev,
194 SWIMS_SET_MODE_Modem);
195 if (result < 0)
196 US_DEBUGP("SWIMS: Failed to switch modem\n");
197 kfree(swocInfo);
198 return -EIO;
199 }
200 kfree(swocInfo);
201 }
202complete:
203 result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
204
205 return USB_STOR_TRANSPORT_GOOD;
206}
207
diff --git a/drivers/usb/storage/sierra_ms.h b/drivers/usb/storage/sierra_ms.h
new file mode 100644
index 000000000000..bb48634ac1fc
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.h
@@ -0,0 +1,4 @@
1#ifndef _SIERRA_MS_H_
2#define _SIERRA_MS_H_
3extern int sierra_ms_init(struct us_data *us);
4#endif
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index fcbbfdb7b2b0..3523a0bfa0ff 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1032,8 +1032,21 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
1032 1032
1033 /* try to compute the actual residue, based on how much data 1033 /* try to compute the actual residue, based on how much data
1034 * was really transferred and what the device tells us */ 1034 * was really transferred and what the device tells us */
1035 if (residue) { 1035 if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
1036 if (!(us->fflags & US_FL_IGNORE_RESIDUE)) { 1036
1037 /* Heuristically detect devices that generate bogus residues
1038 * by seeing what happens with INQUIRY and READ CAPACITY
1039 * commands.
1040 */
1041 if (bcs->Status == US_BULK_STAT_OK &&
1042 scsi_get_resid(srb) == 0 &&
1043 ((srb->cmnd[0] == INQUIRY &&
1044 transfer_length == 36) ||
1045 (srb->cmnd[0] == READ_CAPACITY &&
1046 transfer_length == 8))) {
1047 us->fflags |= US_FL_IGNORE_RESIDUE;
1048
1049 } else {
1037 residue = min(residue, transfer_length); 1050 residue = min(residue, transfer_length);
1038 scsi_set_resid(srb, max(scsi_get_resid(srb), 1051 scsi_set_resid(srb, max(scsi_get_resid(srb),
1039 (int) residue)); 1052 (int) residue));
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 7ae69f55aa96..ba412e68d474 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -225,6 +225,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
225 US_SC_DEVICE, US_PR_DEVICE, NULL, 225 US_SC_DEVICE, US_PR_DEVICE, NULL,
226 US_FL_MAX_SECTORS_64 ), 226 US_FL_MAX_SECTORS_64 ),
227 227
228/* Reported by Cedric Godin <cedric@belbone.be> */
229UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551,
230 "Nokia",
231 "5300",
232 US_SC_DEVICE, US_PR_DEVICE, NULL,
233 US_FL_FIX_CAPACITY ),
234
228/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ 235/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
229UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, 236UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
230 "SMSC", 237 "SMSC",
@@ -356,14 +363,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200,
356 US_FL_FIX_CAPACITY), 363 US_FL_FIX_CAPACITY),
357 364
358/* Reported by Emil Larsson <emil@swip.net> */ 365/* Reported by Emil Larsson <emil@swip.net> */
359UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110, 366UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111,
360 "NIKON", 367 "NIKON",
361 "NIKON DSC D80", 368 "NIKON DSC D80",
362 US_SC_DEVICE, US_PR_DEVICE, NULL, 369 US_SC_DEVICE, US_PR_DEVICE, NULL,
363 US_FL_FIX_CAPACITY), 370 US_FL_FIX_CAPACITY),
364 371
365/* Reported by Ortwin Glueck <odi@odi.ch> */ 372/* Reported by Ortwin Glueck <odi@odi.ch> */
366UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110, 373UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111,
367 "NIKON", 374 "NIKON",
368 "NIKON DSC D40", 375 "NIKON DSC D40",
369 US_SC_DEVICE, US_PR_DEVICE, NULL, 376 US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -1185,6 +1192,13 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
1185 US_SC_DEVICE, US_PR_DEVICE, NULL, 1192 US_SC_DEVICE, US_PR_DEVICE, NULL,
1186 US_FL_FIX_INQUIRY ), 1193 US_FL_FIX_INQUIRY ),
1187 1194
1195/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
1196UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
1197 "Simple Tech/Datafab",
1198 "CF+SM Reader",
1199 US_SC_DEVICE, US_PR_DEVICE, NULL,
1200 US_FL_IGNORE_RESIDUE ),
1201
1188/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant 1202/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
1189 * to the USB storage specification in two ways: 1203 * to the USB storage specification in two ways:
1190 * - They tell us they are using transport protocol CBI. In reality they 1204 * - They tell us they are using transport protocol CBI. In reality they
@@ -1562,6 +1576,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
1562 US_SC_DEVICE, US_PR_DEVICE, NULL, 1576 US_SC_DEVICE, US_PR_DEVICE, NULL,
1563 0), 1577 0),
1564 1578
1579#ifdef CONFIG_USB_STORAGE_SIERRA
1565/* Reported by Kevin Lloyd <linux@sierrawireless.com> 1580/* Reported by Kevin Lloyd <linux@sierrawireless.com>
1566 * Entry is needed for the initializer function override, 1581 * Entry is needed for the initializer function override,
1567 * which instructs the device to load as a modem 1582 * which instructs the device to load as a modem
@@ -1570,8 +1585,9 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
1570UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, 1585UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
1571 "Sierra Wireless", 1586 "Sierra Wireless",
1572 "USB MMC Storage", 1587 "USB MMC Storage",
1573 US_SC_DEVICE, US_PR_DEVICE, NULL, 1588 US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init,
1574 US_FL_IGNORE_DEVICE), 1589 0),
1590#endif
1575 1591
1576/* Reported by Jaco Kroon <jaco@kroon.co.za> 1592/* Reported by Jaco Kroon <jaco@kroon.co.za>
1577 * The usb-storage module found on the Digitech GNX4 (and supposedly other 1593 * The usb-storage module found on the Digitech GNX4 (and supposedly other
@@ -1743,6 +1759,15 @@ UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0002,
1743 US_FL_FIX_CAPACITY), 1759 US_FL_FIX_CAPACITY),
1744 1760
1745/* 1761/*
1762 * Patch by Jost Diederichs <jost@qdusa.com>
1763 */
1764UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999,
1765 "Motorola Inc.",
1766 "Motorola Phone (RAZRV3xx)",
1767 US_SC_DEVICE, US_PR_DEVICE, NULL,
1768 US_FL_FIX_CAPACITY),
1769
1770/*
1746 * Patch by Constantin Baranov <const@tltsu.ru> 1771 * Patch by Constantin Baranov <const@tltsu.ru>
1747 * Report by Andreas Koenecke. 1772 * Report by Andreas Koenecke.
1748 * Motorola ROKR Z6. 1773 * Motorola ROKR Z6.
@@ -1767,6 +1792,13 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
1767 US_SC_DEVICE, US_PR_DEVICE, NULL, 1792 US_SC_DEVICE, US_PR_DEVICE, NULL,
1768 US_FL_FIX_CAPACITY ), 1793 US_FL_FIX_CAPACITY ),
1769 1794
1795/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
1796UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
1797 "iRiver",
1798 "MP3 T10",
1799 US_SC_DEVICE, US_PR_DEVICE, NULL,
1800 US_FL_IGNORE_RESIDUE ),
1801
1770/* 1802/*
1771 * David Härdeman <david@2gen.com> 1803 * David Härdeman <david@2gen.com>
1772 * The key makes the SCSI stack print confusing (but harmless) messages 1804 * The key makes the SCSI stack print confusing (but harmless) messages
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bfea851be985..73679aa506de 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -102,6 +102,9 @@
102#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB 102#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
103#include "cypress_atacb.h" 103#include "cypress_atacb.h"
104#endif 104#endif
105#ifdef CONFIG_USB_STORAGE_SIERRA
106#include "sierra_ms.h"
107#endif
105 108
106/* Some informational data */ 109/* Some informational data */
107MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); 110MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index e7018a2f56af..9c5925927ece 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -39,7 +39,9 @@
39#endif 39#endif
40 40
41#if defined(CONFIG_ARCH_AT91) 41#if defined(CONFIG_ARCH_AT91)
42#define ATMEL_LCDFB_FBINFO_DEFAULT FBINFO_DEFAULT 42#define ATMEL_LCDFB_FBINFO_DEFAULT (FBINFO_DEFAULT \
43 | FBINFO_PARTIAL_PAN_OK \
44 | FBINFO_HWACCEL_YPAN)
43 45
44static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo, 46static inline void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
45 struct fb_var_screeninfo *var) 47 struct fb_var_screeninfo *var)
@@ -177,7 +179,7 @@ static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
177 .type = FB_TYPE_PACKED_PIXELS, 179 .type = FB_TYPE_PACKED_PIXELS,
178 .visual = FB_VISUAL_TRUECOLOR, 180 .visual = FB_VISUAL_TRUECOLOR,
179 .xpanstep = 0, 181 .xpanstep = 0,
180 .ypanstep = 0, 182 .ypanstep = 1,
181 .ywrapstep = 0, 183 .ywrapstep = 0,
182 .accel = FB_ACCEL_NONE, 184 .accel = FB_ACCEL_NONE,
183}; 185};
@@ -240,9 +242,11 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
240{ 242{
241 struct fb_info *info = sinfo->info; 243 struct fb_info *info = sinfo->info;
242 struct fb_var_screeninfo *var = &info->var; 244 struct fb_var_screeninfo *var = &info->var;
245 unsigned int smem_len;
243 246
244 info->fix.smem_len = (var->xres_virtual * var->yres_virtual 247 smem_len = (var->xres_virtual * var->yres_virtual
245 * ((var->bits_per_pixel + 7) / 8)); 248 * ((var->bits_per_pixel + 7) / 8));
249 info->fix.smem_len = max(smem_len, sinfo->smem_len);
246 250
247 info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len, 251 info->screen_base = dma_alloc_writecombine(info->device, info->fix.smem_len,
248 (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL); 252 (dma_addr_t *)&info->fix.smem_start, GFP_KERNEL);
@@ -794,6 +798,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
794 sinfo->default_monspecs = pdata_sinfo->default_monspecs; 798 sinfo->default_monspecs = pdata_sinfo->default_monspecs;
795 sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control; 799 sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
796 sinfo->guard_time = pdata_sinfo->guard_time; 800 sinfo->guard_time = pdata_sinfo->guard_time;
801 sinfo->smem_len = pdata_sinfo->smem_len;
797 sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight; 802 sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
798 sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode; 803 sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
799 } else { 804 } else {
diff --git a/drivers/video/aty/radeon_accel.c b/drivers/video/aty/radeon_accel.c
index 4d13f68436e6..aa95f8350242 100644
--- a/drivers/video/aty/radeon_accel.c
+++ b/drivers/video/aty/radeon_accel.c
@@ -55,6 +55,10 @@ static void radeonfb_prim_fillrect(struct radeonfb_info *rinfo,
55 OUTREG(DP_WRITE_MSK, 0xffffffff); 55 OUTREG(DP_WRITE_MSK, 0xffffffff);
56 OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM)); 56 OUTREG(DP_CNTL, (DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM));
57 57
58 radeon_fifo_wait(2);
59 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
60 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
61
58 radeon_fifo_wait(2); 62 radeon_fifo_wait(2);
59 OUTREG(DST_Y_X, (region->dy << 16) | region->dx); 63 OUTREG(DST_Y_X, (region->dy << 16) | region->dx);
60 OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height); 64 OUTREG(DST_WIDTH_HEIGHT, (region->width << 16) | region->height);
@@ -116,6 +120,10 @@ static void radeonfb_prim_copyarea(struct radeonfb_info *rinfo,
116 OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0) 120 OUTREG(DP_CNTL, (xdir>=0 ? DST_X_LEFT_TO_RIGHT : 0)
117 | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0)); 121 | (ydir>=0 ? DST_Y_TOP_TO_BOTTOM : 0));
118 122
123 radeon_fifo_wait(2);
124 OUTREG(DSTCACHE_CTLSTAT, RB2D_DC_FLUSH_ALL);
125 OUTREG(WAIT_UNTIL, (WAIT_2D_IDLECLEAN | WAIT_DMA_GUI_IDLE));
126
119 radeon_fifo_wait(3); 127 radeon_fifo_wait(3);
120 OUTREG(SRC_Y_X, (sy << 16) | sx); 128 OUTREG(SRC_Y_X, (sy << 16) | sx);
121 OUTREG(DST_Y_X, (dy << 16) | dx); 129 OUTREG(DST_Y_X, (dy << 16) | dx);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 33859934a8e4..c6299e8a041d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -2518,7 +2518,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
2518 c = vc->vc_video_erase_char; 2518 c = vc->vc_video_erase_char;
2519 vc->vc_video_erase_char = 2519 vc->vc_video_erase_char =
2520 ((c & 0xfe00) >> 1) | (c & 0xff); 2520 ((c & 0xfe00) >> 1) | (c & 0xff);
2521 c = vc->vc_def_color; 2521 c = vc->vc_scrl_erase_char;
2522 vc->vc_scrl_erase_char = 2522 vc->vc_scrl_erase_char =
2523 ((c & 0xFE00) >> 1) | (c & 0xFF); 2523 ((c & 0xFE00) >> 1) | (c & 0xFF);
2524 vc->vc_attr >>= 1; 2524 vc->vc_attr >>= 1;
@@ -2551,7 +2551,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
2551 if (vc->vc_can_do_color) { 2551 if (vc->vc_can_do_color) {
2552 vc->vc_video_erase_char = 2552 vc->vc_video_erase_char =
2553 ((c & 0xff00) << 1) | (c & 0xff); 2553 ((c & 0xff00) << 1) | (c & 0xff);
2554 c = vc->vc_def_color; 2554 c = vc->vc_scrl_erase_char;
2555 vc->vc_scrl_erase_char = 2555 vc->vc_scrl_erase_char =
2556 ((c & 0xFF00) << 1) | (c & 0xFF); 2556 ((c & 0xFF00) << 1) | (c & 0xFF);
2557 vc->vc_attr <<= 1; 2557 vc->vc_attr <<= 1;
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index de1b1365279b..a6e38e9ea73f 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -92,7 +92,7 @@ struct fbcon_ops {
92#define attr_fgcol(fgshift,s) \ 92#define attr_fgcol(fgshift,s) \
93 (((s) >> (fgshift)) & 0x0f) 93 (((s) >> (fgshift)) & 0x0f)
94#define attr_bgcol(bgshift,s) \ 94#define attr_bgcol(bgshift,s) \
95 (((s) >> (bgshift)) & 0x07) 95 (((s) >> (bgshift)) & 0x0f)
96 96
97/* Monochrome */ 97/* Monochrome */
98#define attr_bold(s) \ 98#define attr_bold(s) \
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index bd320a2bfb7c..fb51197d1c98 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -479,6 +479,10 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
479 base_plane_width = machine_data->fsl_diu_info[0]->var.xres; 479 base_plane_width = machine_data->fsl_diu_info[0]->var.xres;
480 base_plane_height = machine_data->fsl_diu_info[0]->var.yres; 480 base_plane_height = machine_data->fsl_diu_info[0]->var.yres;
481 481
482 if (mfbi->x_aoi_d < 0)
483 mfbi->x_aoi_d = 0;
484 if (mfbi->y_aoi_d < 0)
485 mfbi->y_aoi_d = 0;
482 switch (index) { 486 switch (index) {
483 case 0: 487 case 0:
484 if (mfbi->x_aoi_d != 0) 488 if (mfbi->x_aoi_d != 0)
@@ -778,6 +782,22 @@ static void unmap_video_memory(struct fb_info *info)
778} 782}
779 783
780/* 784/*
785 * Using the fb_var_screeninfo in fb_info we set the aoi of this
786 * particular framebuffer. It is a light version of fsl_diu_set_par.
787 */
788static int fsl_diu_set_aoi(struct fb_info *info)
789{
790 struct fb_var_screeninfo *var = &info->var;
791 struct mfb_info *mfbi = info->par;
792 struct diu_ad *ad = mfbi->ad;
793
794 /* AOI should not be greater than display size */
795 ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset);
796 ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
797 return 0;
798}
799
800/*
781 * Using the fb_var_screeninfo in fb_info we set the resolution of this 801 * Using the fb_var_screeninfo in fb_info we set the resolution of this
782 * particular framebuffer. This function alters the fb_fix_screeninfo stored 802 * particular framebuffer. This function alters the fb_fix_screeninfo stored
783 * in fb_info. It does not alter var in fb_info since we are using that 803 * in fb_info. It does not alter var in fb_info since we are using that
@@ -817,11 +837,11 @@ static int fsl_diu_set_par(struct fb_info *info)
817 diu_ops.get_pixel_format(var->bits_per_pixel, 837 diu_ops.get_pixel_format(var->bits_per_pixel,
818 machine_data->monitor_port); 838 machine_data->monitor_port);
819 ad->addr = cpu_to_le32(info->fix.smem_start); 839 ad->addr = cpu_to_le32(info->fix.smem_start);
820 ad->src_size_g_alpha = cpu_to_le32((var->yres << 12) | 840 ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) |
821 var->xres) | mfbi->g_alpha; 841 var->xres_virtual) | mfbi->g_alpha;
822 /* fix me. AOI should not be greater than display size */ 842 /* AOI should not be greater than display size */
823 ad->aoi_size = cpu_to_le32((var->yres << 16) | var->xres); 843 ad->aoi_size = cpu_to_le32((var->yres << 16) | var->xres);
824 ad->offset_xyi = 0; 844 ad->offset_xyi = cpu_to_le32((var->yoffset << 16) | var->xoffset);
825 ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d); 845 ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
826 846
827 /* Disable chroma keying function */ 847 /* Disable chroma keying function */
@@ -921,6 +941,8 @@ static int fsl_diu_pan_display(struct fb_var_screeninfo *var,
921 else 941 else
922 info->var.vmode &= ~FB_VMODE_YWRAP; 942 info->var.vmode &= ~FB_VMODE_YWRAP;
923 943
944 fsl_diu_set_aoi(info);
945
924 return 0; 946 return 0;
925} 947}
926 948
@@ -989,7 +1011,7 @@ static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
989 pr_debug("set AOI display offset of index %d to (%d,%d)\n", 1011 pr_debug("set AOI display offset of index %d to (%d,%d)\n",
990 mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d); 1012 mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d);
991 fsl_diu_check_var(&info->var, info); 1013 fsl_diu_check_var(&info->var, info);
992 fsl_diu_set_par(info); 1014 fsl_diu_set_aoi(info);
993 break; 1015 break;
994 case MFB_GET_AOID: 1016 case MFB_GET_AOID:
995 aoi_d.x_aoi_d = mfbi->x_aoi_d; 1017 aoi_d.x_aoi_d = mfbi->x_aoi_d;
diff --git a/drivers/video/matrox/i2c-matroxfb.c b/drivers/video/matrox/i2c-matroxfb.c
index 75ee5a12e549..c14e3e2212b3 100644
--- a/drivers/video/matrox/i2c-matroxfb.c
+++ b/drivers/video/matrox/i2c-matroxfb.c
@@ -87,13 +87,7 @@ static int matroxfb_gpio_getscl(void* data) {
87 return (matroxfb_read_gpio(b->minfo) & b->mask.clock) ? 1 : 0; 87 return (matroxfb_read_gpio(b->minfo) & b->mask.clock) ? 1 : 0;
88} 88}
89 89
90static struct i2c_adapter matrox_i2c_adapter_template = 90static const struct i2c_algo_bit_data matrox_i2c_algo_template =
91{
92 .owner = THIS_MODULE,
93 .id = I2C_HW_B_G400,
94};
95
96static struct i2c_algo_bit_data matrox_i2c_algo_template =
97{ 91{
98 .setsda = matroxfb_gpio_setsda, 92 .setsda = matroxfb_gpio_setsda,
99 .setscl = matroxfb_gpio_setscl, 93 .setscl = matroxfb_gpio_setscl,
@@ -112,7 +106,7 @@ static int i2c_bus_reg(struct i2c_bit_adapter* b, struct matrox_fb_info* minfo,
112 b->minfo = minfo; 106 b->minfo = minfo;
113 b->mask.data = data; 107 b->mask.data = data;
114 b->mask.clock = clock; 108 b->mask.clock = clock;
115 b->adapter = matrox_i2c_adapter_template; 109 b->adapter.owner = THIS_MODULE;
116 snprintf(b->adapter.name, sizeof(b->adapter.name), name, 110 snprintf(b->adapter.name, sizeof(b->adapter.name), name,
117 minfo->fbcon.node); 111 minfo->fbcon.node);
118 i2c_set_adapdata(&b->adapter, b); 112 i2c_set_adapdata(&b->adapter, b);
@@ -187,6 +181,17 @@ static void* i2c_matroxfb_probe(struct matrox_fb_info* minfo) {
187 MAT_DATA, MAT_CLK, "MAVEN:fb%u", 0); 181 MAT_DATA, MAT_CLK, "MAVEN:fb%u", 0);
188 if (err) 182 if (err)
189 printk(KERN_INFO "i2c-matroxfb: Could not register Maven i2c bus. Continuing anyway.\n"); 183 printk(KERN_INFO "i2c-matroxfb: Could not register Maven i2c bus. Continuing anyway.\n");
184 else {
185 struct i2c_board_info maven_info = {
186 I2C_BOARD_INFO("maven", 0x1b),
187 };
188 unsigned short const addr_list[2] = {
189 0x1b, I2C_CLIENT_END
190 };
191
192 i2c_new_probed_device(&m2info->maven.adapter,
193 &maven_info, addr_list);
194 }
190 } 195 }
191 return m2info; 196 return m2info;
192fail_ddc1:; 197fail_ddc1:;
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 89da27bd5c49..042408a8c631 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -19,8 +19,6 @@
19#include <linux/matroxfb.h> 19#include <linux/matroxfb.h>
20#include <asm/div64.h> 20#include <asm/div64.h>
21 21
22#define MAVEN_I2CID (0x1B)
23
24#define MGATVO_B 1 22#define MGATVO_B 1
25#define MGATVO_C 2 23#define MGATVO_C 2
26 24
@@ -128,7 +126,7 @@ static int get_ctrl_id(__u32 v4l2_id) {
128 126
129struct maven_data { 127struct maven_data {
130 struct matrox_fb_info* primary_head; 128 struct matrox_fb_info* primary_head;
131 struct i2c_client client; 129 struct i2c_client *client;
132 int version; 130 int version;
133}; 131};
134 132
@@ -974,7 +972,7 @@ static inline int maven_compute_timming(struct maven_data* md,
974 972
975static int maven_program_timming(struct maven_data* md, 973static int maven_program_timming(struct maven_data* md,
976 const struct mavenregs* m) { 974 const struct mavenregs* m) {
977 struct i2c_client* c = &md->client; 975 struct i2c_client *c = md->client;
978 976
979 if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) { 977 if (m->mode == MATROXFB_OUTPUT_MODE_MONITOR) {
980 LR(0x80); 978 LR(0x80);
@@ -1011,7 +1009,7 @@ static int maven_program_timming(struct maven_data* md,
1011} 1009}
1012 1010
1013static inline int maven_resync(struct maven_data* md) { 1011static inline int maven_resync(struct maven_data* md) {
1014 struct i2c_client* c = &md->client; 1012 struct i2c_client *c = md->client;
1015 maven_set_reg(c, 0x95, 0x20); /* start whole thing */ 1013 maven_set_reg(c, 0x95, 0x20); /* start whole thing */
1016 return 0; 1014 return 0;
1017} 1015}
@@ -1069,48 +1067,48 @@ static int maven_set_control (struct maven_data* md,
1069 maven_compute_bwlevel(md, &blacklevel, &whitelevel); 1067 maven_compute_bwlevel(md, &blacklevel, &whitelevel);
1070 blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8); 1068 blacklevel = (blacklevel >> 2) | ((blacklevel & 3) << 8);
1071 whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8); 1069 whitelevel = (whitelevel >> 2) | ((whitelevel & 3) << 8);
1072 maven_set_reg_pair(&md->client, 0x0e, blacklevel); 1070 maven_set_reg_pair(md->client, 0x0e, blacklevel);
1073 maven_set_reg_pair(&md->client, 0x1e, whitelevel); 1071 maven_set_reg_pair(md->client, 0x1e, whitelevel);
1074 } 1072 }
1075 break; 1073 break;
1076 case V4L2_CID_SATURATION: 1074 case V4L2_CID_SATURATION:
1077 { 1075 {
1078 maven_set_reg(&md->client, 0x20, p->value); 1076 maven_set_reg(md->client, 0x20, p->value);
1079 maven_set_reg(&md->client, 0x22, p->value); 1077 maven_set_reg(md->client, 0x22, p->value);
1080 } 1078 }
1081 break; 1079 break;
1082 case V4L2_CID_HUE: 1080 case V4L2_CID_HUE:
1083 { 1081 {
1084 maven_set_reg(&md->client, 0x25, p->value); 1082 maven_set_reg(md->client, 0x25, p->value);
1085 } 1083 }
1086 break; 1084 break;
1087 case V4L2_CID_GAMMA: 1085 case V4L2_CID_GAMMA:
1088 { 1086 {
1089 const struct maven_gamma* g; 1087 const struct maven_gamma* g;
1090 g = maven_compute_gamma(md); 1088 g = maven_compute_gamma(md);
1091 maven_set_reg(&md->client, 0x83, g->reg83); 1089 maven_set_reg(md->client, 0x83, g->reg83);
1092 maven_set_reg(&md->client, 0x84, g->reg84); 1090 maven_set_reg(md->client, 0x84, g->reg84);
1093 maven_set_reg(&md->client, 0x85, g->reg85); 1091 maven_set_reg(md->client, 0x85, g->reg85);
1094 maven_set_reg(&md->client, 0x86, g->reg86); 1092 maven_set_reg(md->client, 0x86, g->reg86);
1095 maven_set_reg(&md->client, 0x87, g->reg87); 1093 maven_set_reg(md->client, 0x87, g->reg87);
1096 maven_set_reg(&md->client, 0x88, g->reg88); 1094 maven_set_reg(md->client, 0x88, g->reg88);
1097 maven_set_reg(&md->client, 0x89, g->reg89); 1095 maven_set_reg(md->client, 0x89, g->reg89);
1098 maven_set_reg(&md->client, 0x8a, g->reg8a); 1096 maven_set_reg(md->client, 0x8a, g->reg8a);
1099 maven_set_reg(&md->client, 0x8b, g->reg8b); 1097 maven_set_reg(md->client, 0x8b, g->reg8b);
1100 } 1098 }
1101 break; 1099 break;
1102 case MATROXFB_CID_TESTOUT: 1100 case MATROXFB_CID_TESTOUT:
1103 { 1101 {
1104 unsigned char val 1102 unsigned char val
1105 = maven_get_reg(&md->client,0x8d); 1103 = maven_get_reg(md->client, 0x8d);
1106 if (p->value) val |= 0x10; 1104 if (p->value) val |= 0x10;
1107 else val &= ~0x10; 1105 else val &= ~0x10;
1108 maven_set_reg(&md->client, 0x8d, val); 1106 maven_set_reg(md->client, 0x8d, val);
1109 } 1107 }
1110 break; 1108 break;
1111 case MATROXFB_CID_DEFLICKER: 1109 case MATROXFB_CID_DEFLICKER:
1112 { 1110 {
1113 maven_set_reg(&md->client, 0x93, maven_compute_deflicker(md)); 1111 maven_set_reg(md->client, 0x93, maven_compute_deflicker(md));
1114 } 1112 }
1115 break; 1113 break;
1116 } 1114 }
@@ -1189,6 +1187,7 @@ static int maven_init_client(struct i2c_client* clnt) {
1189 MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo); 1187 MINFO_FROM(container_of(clnt->adapter, struct i2c_bit_adapter, adapter)->minfo);
1190 1188
1191 md->primary_head = MINFO; 1189 md->primary_head = MINFO;
1190 md->client = clnt;
1192 down_write(&ACCESS_FBINFO(altout.lock)); 1191 down_write(&ACCESS_FBINFO(altout.lock));
1193 ACCESS_FBINFO(outputs[1]).output = &maven_altout; 1192 ACCESS_FBINFO(outputs[1]).output = &maven_altout;
1194 ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src; 1193 ACCESS_FBINFO(outputs[1]).src = ACCESS_FBINFO(outputs[1]).default_src;
@@ -1232,14 +1231,11 @@ static int maven_shutdown_client(struct i2c_client* clnt) {
1232 return 0; 1231 return 0;
1233} 1232}
1234 1233
1235static const unsigned short normal_i2c[] = { MAVEN_I2CID, I2C_CLIENT_END }; 1234static int maven_probe(struct i2c_client *client,
1236I2C_CLIENT_INSMOD; 1235 const struct i2c_device_id *id)
1237 1236{
1238static struct i2c_driver maven_driver; 1237 struct i2c_adapter *adapter = client->adapter;
1239 1238 int err = -ENODEV;
1240static int maven_detect_client(struct i2c_adapter* adapter, int address, int kind) {
1241 int err = 0;
1242 struct i2c_client* new_client;
1243 struct maven_data* data; 1239 struct maven_data* data;
1244 1240
1245 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA | 1241 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
@@ -1250,50 +1246,37 @@ static int maven_detect_client(struct i2c_adapter* adapter, int address, int kin
1250 err = -ENOMEM; 1246 err = -ENOMEM;
1251 goto ERROR0; 1247 goto ERROR0;
1252 } 1248 }
1253 new_client = &data->client; 1249 i2c_set_clientdata(client, data);
1254 i2c_set_clientdata(new_client, data); 1250 err = maven_init_client(client);
1255 new_client->addr = address;
1256 new_client->adapter = adapter;
1257 new_client->driver = &maven_driver;
1258 new_client->flags = 0;
1259 strlcpy(new_client->name, "maven", I2C_NAME_SIZE);
1260 if ((err = i2c_attach_client(new_client)))
1261 goto ERROR3;
1262 err = maven_init_client(new_client);
1263 if (err) 1251 if (err)
1264 goto ERROR4; 1252 goto ERROR4;
1265 return 0; 1253 return 0;
1266ERROR4:; 1254ERROR4:;
1267 i2c_detach_client(new_client); 1255 kfree(data);
1268ERROR3:;
1269 kfree(new_client);
1270ERROR0:; 1256ERROR0:;
1271 return err; 1257 return err;
1272} 1258}
1273 1259
1274static int maven_attach_adapter(struct i2c_adapter* adapter) { 1260static int maven_remove(struct i2c_client *client)
1275 if (adapter->id == I2C_HW_B_G400) 1261{
1276 return i2c_probe(adapter, &addr_data, &maven_detect_client);
1277 return 0;
1278}
1279
1280static int maven_detach_client(struct i2c_client* client) {
1281 int err;
1282
1283 if ((err = i2c_detach_client(client)))
1284 return err;
1285 maven_shutdown_client(client); 1262 maven_shutdown_client(client);
1286 kfree(i2c_get_clientdata(client)); 1263 kfree(i2c_get_clientdata(client));
1287 return 0; 1264 return 0;
1288} 1265}
1289 1266
1267static const struct i2c_device_id maven_id[] = {
1268 { "maven", 0 },
1269 { }
1270};
1271MODULE_DEVICE_TABLE(i2c, maven_id);
1272
1290static struct i2c_driver maven_driver={ 1273static struct i2c_driver maven_driver={
1291 .driver = { 1274 .driver = {
1292 .name = "maven", 1275 .name = "maven",
1293 }, 1276 },
1294 .id = I2C_DRIVERID_MGATVO, 1277 .probe = maven_probe,
1295 .attach_adapter = maven_attach_adapter, 1278 .remove = maven_remove,
1296 .detach_client = maven_detach_client, 1279 .id_table = maven_id,
1297}; 1280};
1298 1281
1299static int __init matroxfb_maven_init(void) 1282static int __init matroxfb_maven_init(void)
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index e7aa7ae8fca8..97204497d9f7 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1031,7 +1031,9 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
1031 pxa_gpio_mode(GPIO74_LCD_FCLK_MD); 1031 pxa_gpio_mode(GPIO74_LCD_FCLK_MD);
1032 pxa_gpio_mode(GPIO75_LCD_LCLK_MD); 1032 pxa_gpio_mode(GPIO75_LCD_LCLK_MD);
1033 pxa_gpio_mode(GPIO76_LCD_PCLK_MD); 1033 pxa_gpio_mode(GPIO76_LCD_PCLK_MD);
1034 pxa_gpio_mode(GPIO77_LCD_ACBIAS_MD); 1034
1035 if ((lccr0 & LCCR0_PAS) == 0)
1036 pxa_gpio_mode(GPIO77_LCD_ACBIAS_MD);
1035} 1037}
1036 1038
1037static void pxafb_enable_controller(struct pxafb_info *fbi) 1039static void pxafb_enable_controller(struct pxafb_info *fbi)
@@ -1400,6 +1402,8 @@ static void pxafb_decode_mach_info(struct pxafb_info *fbi,
1400 if (lcd_conn == LCD_MONO_STN_8BPP) 1402 if (lcd_conn == LCD_MONO_STN_8BPP)
1401 fbi->lccr0 |= LCCR0_DPD; 1403 fbi->lccr0 |= LCCR0_DPD;
1402 1404
1405 fbi->lccr0 |= (lcd_conn & LCD_ALTERNATE_MAPPING) ? LCCR0_LDDALT : 0;
1406
1403 fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff); 1407 fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff);
1404 fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0; 1408 fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0;
1405 fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0; 1409 fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0;
@@ -1673,53 +1677,63 @@ MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
1673#define pxafb_setup_options() (0) 1677#define pxafb_setup_options() (0)
1674#endif 1678#endif
1675 1679
1676static int __devinit pxafb_probe(struct platform_device *dev)
1677{
1678 struct pxafb_info *fbi;
1679 struct pxafb_mach_info *inf;
1680 struct resource *r;
1681 int irq, ret;
1682
1683 dev_dbg(&dev->dev, "pxafb_probe\n");
1684
1685 inf = dev->dev.platform_data;
1686 ret = -ENOMEM;
1687 fbi = NULL;
1688 if (!inf)
1689 goto failed;
1690
1691 ret = pxafb_parse_options(&dev->dev, g_options);
1692 if (ret < 0)
1693 goto failed;
1694
1695#ifdef DEBUG_VAR 1680#ifdef DEBUG_VAR
1696 /* Check for various illegal bit-combinations. Currently only 1681/* Check for various illegal bit-combinations. Currently only
1697 * a warning is given. */ 1682 * a warning is given. */
1683static void __devinit pxafb_check_options(struct device *dev,
1684 struct pxafb_mach_info *inf)
1685{
1686 if (inf->lcd_conn)
1687 return;
1698 1688
1699 if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK) 1689 if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK)
1700 dev_warn(&dev->dev, "machine LCCR0 setting contains " 1690 dev_warn(dev, "machine LCCR0 setting contains "
1701 "illegal bits: %08x\n", 1691 "illegal bits: %08x\n",
1702 inf->lccr0 & LCCR0_INVALID_CONFIG_MASK); 1692 inf->lccr0 & LCCR0_INVALID_CONFIG_MASK);
1703 if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK) 1693 if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK)
1704 dev_warn(&dev->dev, "machine LCCR3 setting contains " 1694 dev_warn(dev, "machine LCCR3 setting contains "
1705 "illegal bits: %08x\n", 1695 "illegal bits: %08x\n",
1706 inf->lccr3 & LCCR3_INVALID_CONFIG_MASK); 1696 inf->lccr3 & LCCR3_INVALID_CONFIG_MASK);
1707 if (inf->lccr0 & LCCR0_DPD && 1697 if (inf->lccr0 & LCCR0_DPD &&
1708 ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas || 1698 ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas ||
1709 (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl || 1699 (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl ||
1710 (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono)) 1700 (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono))
1711 dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is " 1701 dev_warn(dev, "Double Pixel Data (DPD) mode is "
1712 "only valid in passive mono" 1702 "only valid in passive mono"
1713 " single panel mode\n"); 1703 " single panel mode\n");
1714 if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act && 1704 if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act &&
1715 (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual) 1705 (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual)
1716 dev_warn(&dev->dev, "Dual panel only valid in passive mode\n"); 1706 dev_warn(dev, "Dual panel only valid in passive mode\n");
1717 if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas && 1707 if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas &&
1718 (inf->modes->upper_margin || inf->modes->lower_margin)) 1708 (inf->modes->upper_margin || inf->modes->lower_margin))
1719 dev_warn(&dev->dev, "Upper and lower margins must be 0 in " 1709 dev_warn(dev, "Upper and lower margins must be 0 in "
1720 "passive mode\n"); 1710 "passive mode\n");
1711}
1712#else
1713#define pxafb_check_options(...) do {} while (0)
1721#endif 1714#endif
1722 1715
1716static int __devinit pxafb_probe(struct platform_device *dev)
1717{
1718 struct pxafb_info *fbi;
1719 struct pxafb_mach_info *inf;
1720 struct resource *r;
1721 int irq, ret;
1722
1723 dev_dbg(&dev->dev, "pxafb_probe\n");
1724
1725 inf = dev->dev.platform_data;
1726 ret = -ENOMEM;
1727 fbi = NULL;
1728 if (!inf)
1729 goto failed;
1730
1731 ret = pxafb_parse_options(&dev->dev, g_options);
1732 if (ret < 0)
1733 goto failed;
1734
1735 pxafb_check_options(&dev->dev, inf);
1736
1723 dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n", 1737 dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n",
1724 inf->modes->xres, 1738 inf->modes->xres,
1725 inf->modes->yres, 1739 inf->modes->yres,
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 32b9fe153641..db20542796bf 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -285,10 +285,11 @@ config ALIM1535_WDT
285 285
286config ALIM7101_WDT 286config ALIM7101_WDT
287 tristate "ALi M7101 PMU Computer Watchdog" 287 tristate "ALi M7101 PMU Computer Watchdog"
288 depends on X86 && PCI 288 depends on PCI
289 help 289 help
290 This is the driver for the hardware watchdog on the ALi M7101 PMU 290 This is the driver for the hardware watchdog on the ALi M7101 PMU
291 as used in the x86 Cobalt servers. 291 as used in the x86 Cobalt servers and also found in some
292 SPARC Netra servers too.
292 293
293 To compile this driver as a module, choose M here: the 294 To compile this driver as a module, choose M here: the
294 module will be called alim7101_wdt. 295 module will be called alim7101_wdt.
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 049c91895699..ca3dc043d786 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -66,7 +66,10 @@ obj-$(CONFIG_IB700_WDT) += ib700wdt.o
66obj-$(CONFIG_IBMASR) += ibmasr.o 66obj-$(CONFIG_IBMASR) += ibmasr.o
67obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o 67obj-$(CONFIG_WAFER_WDT) += wafer5823wdt.o
68obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o 68obj-$(CONFIG_I6300ESB_WDT) += i6300esb.o
69obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o iTCO_vendor_support.o 69obj-$(CONFIG_ITCO_WDT) += iTCO_wdt.o
70ifeq ($(CONFIG_ITCO_VENDOR_SUPPORT),y)
71obj-$(CONFIG_ITCO_WDT) += iTCO_vendor_support.o
72endif
70obj-$(CONFIG_IT8712F_WDT) += it8712f_wdt.o 73obj-$(CONFIG_IT8712F_WDT) += it8712f_wdt.o
71obj-$(CONFIG_HP_WATCHDOG) += hpwdt.o 74obj-$(CONFIG_HP_WATCHDOG) += hpwdt.o
72obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o 75obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index bacd867dd22e..d061f0ad2d20 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -128,7 +128,7 @@ static struct watchdog_info at91_wdt_info = {
128/* 128/*
129 * Handle commands from user-space. 129 * Handle commands from user-space.
130 */ 130 */
131static long at91_wdt_ioct(struct file *file, 131static long at91_wdt_ioctl(struct file *file,
132 unsigned int cmd, unsigned long arg) 132 unsigned int cmd, unsigned long arg)
133{ 133{
134 void __user *argp = (void __user *)arg; 134 void __user *argp = (void __user *)arg;
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 41264a5f1731..8302ef005be7 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -29,7 +29,7 @@ static int nowayout = WATCHDOG_NOWAYOUT;
29static int heartbeat = 60; /* (secs) Default is 1 minute */ 29static int heartbeat = 60; /* (secs) Default is 1 minute */
30static unsigned long wdt_status; 30static unsigned long wdt_status;
31static unsigned long boot_status; 31static unsigned long boot_status;
32static spin_lock_t wdt_lock; 32static DEFINE_SPINLOCK(wdt_lock);
33 33
34#define WDT_TICK_RATE (IXP4XX_PERIPHERAL_BUS_CLOCK * 1000000UL) 34#define WDT_TICK_RATE (IXP4XX_PERIPHERAL_BUS_CLOCK * 1000000UL)
35 35
diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c
index 3b0ddc7fcf3f..9e1331a3b215 100644
--- a/drivers/watchdog/pcwd.c
+++ b/drivers/watchdog/pcwd.c
@@ -168,7 +168,7 @@ static const int heartbeat_tbl[] = {
168static int cards_found; 168static int cards_found;
169 169
170/* internal variables */ 170/* internal variables */
171static atomic_t open_allowed = ATOMIC_INIT(1); 171static unsigned long open_allowed;
172static char expect_close; 172static char expect_close;
173static int temp_panic; 173static int temp_panic;
174 174
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 3da2b90d2fe6..22715e3be5e7 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -157,8 +157,6 @@ static void s3c2410wdt_start(void)
157 writel(wdt_count, wdt_base + S3C2410_WTCNT); 157 writel(wdt_count, wdt_base + S3C2410_WTCNT);
158 writel(wtcon, wdt_base + S3C2410_WTCON); 158 writel(wtcon, wdt_base + S3C2410_WTCON);
159 spin_unlock(&wdt_lock); 159 spin_unlock(&wdt_lock);
160
161 return 0;
162} 160}
163 161
164static int s3c2410wdt_set_heartbeat(int timeout) 162static int s3c2410wdt_set_heartbeat(int timeout)
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 824125adf90a..cdc7138be301 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -30,7 +30,7 @@
30#include <linux/mm.h> 30#include <linux/mm.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33#include <linux/watchdog.h> 33#include <asm/watchdog.h>
34 34
35#define PFX "shwdt: " 35#define PFX "shwdt: "
36 36
@@ -68,7 +68,7 @@ static int clock_division_ratio = WTCSR_CKS_4096;
68static void sh_wdt_ping(unsigned long data); 68static void sh_wdt_ping(unsigned long data);
69 69
70static unsigned long shwdt_is_open; 70static unsigned long shwdt_is_open;
71static struct watchdog_info sh_wdt_info; 71static const struct watchdog_info sh_wdt_info;
72static char shwdt_expect_close; 72static char shwdt_expect_close;
73static DEFINE_TIMER(timer, sh_wdt_ping, 0, 0); 73static DEFINE_TIMER(timer, sh_wdt_ping, 0, 0);
74static unsigned long next_heartbeat; 74static unsigned long next_heartbeat;
@@ -89,7 +89,7 @@ static void sh_wdt_start(void)
89 __u8 csr; 89 __u8 csr;
90 unsigned long flags; 90 unsigned long flags;
91 91
92 spin_lock_irqsave(&wdt_lock, flags); 92 spin_lock_irqsave(&shwdt_lock, flags);
93 93
94 next_heartbeat = jiffies + (heartbeat * HZ); 94 next_heartbeat = jiffies + (heartbeat * HZ);
95 mod_timer(&timer, next_ping_period(clock_division_ratio)); 95 mod_timer(&timer, next_ping_period(clock_division_ratio));
@@ -127,7 +127,7 @@ static void sh_wdt_start(void)
127 csr &= ~RSTCSR_RSTS; 127 csr &= ~RSTCSR_RSTS;
128 sh_wdt_write_rstcsr(csr); 128 sh_wdt_write_rstcsr(csr);
129#endif 129#endif
130 spin_unlock_irqrestore(&wdt_lock, flags); 130 spin_unlock_irqrestore(&shwdt_lock, flags);
131} 131}
132 132
133/** 133/**
@@ -139,14 +139,14 @@ static void sh_wdt_stop(void)
139 __u8 csr; 139 __u8 csr;
140 unsigned long flags; 140 unsigned long flags;
141 141
142 spin_lock_irqsave(&wdt_lock, flags); 142 spin_lock_irqsave(&shwdt_lock, flags);
143 143
144 del_timer(&timer); 144 del_timer(&timer);
145 145
146 csr = sh_wdt_read_csr(); 146 csr = sh_wdt_read_csr();
147 csr &= ~WTCSR_TME; 147 csr &= ~WTCSR_TME;
148 sh_wdt_write_csr(csr); 148 sh_wdt_write_csr(csr);
149 spin_unlock_irqrestore(&wdt_lock, flags); 149 spin_unlock_irqrestore(&shwdt_lock, flags);
150} 150}
151 151
152/** 152/**
@@ -157,9 +157,9 @@ static inline void sh_wdt_keepalive(void)
157{ 157{
158 unsigned long flags; 158 unsigned long flags;
159 159
160 spin_lock_irqsave(&wdt_lock, flags); 160 spin_lock_irqsave(&shwdt_lock, flags);
161 next_heartbeat = jiffies + (heartbeat * HZ); 161 next_heartbeat = jiffies + (heartbeat * HZ);
162 spin_unlock_irqrestore(&wdt_lock, flags); 162 spin_unlock_irqrestore(&shwdt_lock, flags);
163} 163}
164 164
165/** 165/**
@@ -173,9 +173,9 @@ static int sh_wdt_set_heartbeat(int t)
173 if (unlikely(t < 1 || t > 3600)) /* arbitrary upper limit */ 173 if (unlikely(t < 1 || t > 3600)) /* arbitrary upper limit */
174 return -EINVAL; 174 return -EINVAL;
175 175
176 spin_lock_irqsave(&wdt_lock, flags); 176 spin_lock_irqsave(&shwdt_lock, flags);
177 heartbeat = t; 177 heartbeat = t;
178 spin_unlock_irqrestore(&wdt_lock, flags); 178 spin_unlock_irqrestore(&shwdt_lock, flags);
179 return 0; 179 return 0;
180} 180}
181 181
@@ -189,7 +189,7 @@ static void sh_wdt_ping(unsigned long data)
189{ 189{
190 unsigned long flags; 190 unsigned long flags;
191 191
192 spin_lock_irqsave(&wdt_lock, flags); 192 spin_lock_irqsave(&shwdt_lock, flags);
193 if (time_before(jiffies, next_heartbeat)) { 193 if (time_before(jiffies, next_heartbeat)) {
194 __u8 csr; 194 __u8 csr;
195 195
@@ -203,7 +203,7 @@ static void sh_wdt_ping(unsigned long data)
203 } else 203 } else
204 printk(KERN_WARNING PFX "Heartbeat lost! Will not ping " 204 printk(KERN_WARNING PFX "Heartbeat lost! Will not ping "
205 "the watchdog\n"); 205 "the watchdog\n");
206 spin_unlock_irqrestore(&wdt_lock, flags); 206 spin_unlock_irqrestore(&shwdt_lock, flags);
207} 207}
208 208
209/** 209/**
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index dbbc018a5f46..6adab77fbbb0 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -45,7 +45,7 @@ static unsigned long txx9wdt_alive;
45static int expect_close; 45static int expect_close;
46static struct txx9_tmr_reg __iomem *txx9wdt_reg; 46static struct txx9_tmr_reg __iomem *txx9wdt_reg;
47static struct clk *txx9_imclk; 47static struct clk *txx9_imclk;
48static DECLARE_LOCK(txx9_lock); 48static DEFINE_SPINLOCK(txx9_lock);
49 49
50static void txx9wdt_ping(void) 50static void txx9wdt_ping(void)
51{ 51{
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index c8d7f1b2df02..db362c34958b 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -208,7 +208,7 @@ static int __init footbridge_watchdog_init(void)
208 soft_margin); 208 soft_margin);
209 209
210 if (machine_is_cats()) 210 if (machine_is_cats())
211 printk(KERN_WARN 211 printk(KERN_WARNING
212 "Warning: Watchdog reset may not work on this machine.\n"); 212 "Warning: Watchdog reset may not work on this machine.\n");
213 return 0; 213 return 0;
214} 214}