aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/ec.c73
-rw-r--r--drivers/acpi/sleep.c (renamed from drivers/acpi/main.c)0
-rw-r--r--drivers/acpi/thermal.c16
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/libata-core.c73
-rw-r--r--drivers/ata/libata-scsi.c17
-rw-r--r--drivers/ata/pata_ali.c28
-rw-r--r--drivers/ata/pata_atiixp.c32
-rw-r--r--drivers/ata/pata_octeon_cf.c965
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_via.c2
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/block/nbd.c4
-rw-r--r--drivers/block/ps3disk.c18
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/ps3flash.c18
-rw-r--r--drivers/char/synclink_gt.c16
-rw-r--r--drivers/char/sysrq.c6
-rw-r--r--drivers/char/tty_ioctl.c2
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dmaengine.c8
-rw-r--r--drivers/dma/dmatest.c35
-rw-r--r--drivers/dma/fsldma.c8
-rw-r--r--drivers/dma/ipu/Makefile1
-rw-r--r--drivers/dma/ipu/ipu_idmac.c1740
-rw-r--r--drivers/dma/ipu/ipu_intern.h176
-rw-r--r--drivers/dma/ipu/ipu_irq.c413
-rw-r--r--drivers/firmware/dell_rbu.c4
-rw-r--r--drivers/gpio/max7301.c6
-rw-r--r--drivers/gpio/max732x.c6
-rw-r--r--drivers/gpio/mcp23s08.c6
-rw-r--r--drivers/gpio/pca953x.c6
-rw-r--r--drivers/gpio/pcf857x.c12
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c14
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c175
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c18
-rw-r--r--drivers/gpu/drm/drm_stub.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c189
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c43
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c20
-rw-r--r--drivers/hwmon/Kconfig24
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/abituguru3.c24
-rw-r--r--drivers/hwmon/adt7475.c1221
-rw-r--r--drivers/hwmon/applesmc.c5
-rw-r--r--drivers/hwmon/hp_accel.c75
-rw-r--r--drivers/hwmon/k8temp.c55
-rw-r--r--drivers/i2c/busses/i2c-acorn.c1
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c1
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c1
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c1
-rw-r--r--drivers/i2c/busses/i2c-amd756.c1
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c1
-rw-r--r--drivers/i2c/busses/i2c-au1550.c1
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c1
-rw-r--r--drivers/i2c/busses/i2c-elektor.c1
-rw-r--r--drivers/i2c/busses/i2c-hydra.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c1
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c1
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c1
-rw-r--r--drivers/i2c/busses/i2c-mpc.c1
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c1
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c1
-rw-r--r--drivers/i2c/busses/i2c-parport.c1
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c1
-rw-r--r--drivers/i2c/busses/i2c-piix4.c1
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c2
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c1
-rw-r--r--drivers/i2c/busses/i2c-sis630.c1
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c1
-rw-r--r--drivers/i2c/busses/i2c-via.c1
-rw-r--r--drivers/i2c/busses/i2c-viapro.c1
-rw-r--r--drivers/i2c/busses/i2c-voodoo3.c2
-rw-r--r--drivers/i2c/busses/scx200_acb.c1
-rw-r--r--drivers/i2c/busses/scx200_i2c.c1
-rw-r--r--drivers/i2c/chips/Kconfig37
-rw-r--r--drivers/i2c/chips/Makefile2
-rw-r--r--drivers/ide/falconide.c2
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/palm_bk3710.c11
-rw-r--r--drivers/ieee1394/pcilynx.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c18
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mcast.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c144
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c32
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c2
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c56
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c28
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c11
-rw-r--r--drivers/isdn/i4l/isdn_net.c9
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c2
-rw-r--r--drivers/leds/Kconfig7
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-hp-disk.c137
-rw-r--r--drivers/message/fusion/lsi/mpi.h7
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h47
-rw-r--r--drivers/message/fusion/lsi/mpi_fc.h2
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt86
-rw-r--r--drivers/message/fusion/lsi/mpi_init.h2
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h22
-rw-r--r--drivers/message/fusion/lsi/mpi_lan.h2
-rw-r--r--drivers/message/fusion/lsi/mpi_log_fc.h2
-rw-r--r--drivers/message/fusion/lsi/mpi_log_sas.h37
-rw-r--r--drivers/message/fusion/lsi/mpi_raid.h11
-rw-r--r--drivers/message/fusion/lsi/mpi_sas.h18
-rw-r--r--drivers/message/fusion/lsi/mpi_targ.h2
-rw-r--r--drivers/message/fusion/lsi/mpi_tool.h2
-rw-r--r--drivers/message/fusion/lsi/mpi_type.h4
-rw-r--r--drivers/message/fusion/mptbase.c87
-rw-r--r--drivers/message/fusion/mptbase.h3
-rw-r--r--drivers/message/fusion/mptscsih.c3
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/eeprom/Kconfig59
-rw-r--r--drivers/misc/eeprom/Makefile4
-rw-r--r--drivers/misc/eeprom/at24.c (renamed from drivers/i2c/chips/at24.c)0
-rw-r--r--drivers/misc/eeprom/at25.c (renamed from drivers/spi/at25.c)0
-rw-r--r--drivers/misc/eeprom/eeprom.c (renamed from drivers/i2c/chips/eeprom.c)0
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c (renamed from drivers/misc/eeprom_93cx6.c)0
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c2
-rw-r--r--drivers/net/arm/etherh.c2
-rw-r--r--drivers/net/ax88796.c27
-rw-r--r--drivers/net/b44.c4
-rw-r--r--drivers/net/b44.h2
-rw-r--r--drivers/net/bnx2x.h30
-rw-r--r--drivers/net/bnx2x_link.c64
-rw-r--r--drivers/net/bnx2x_main.c480
-rw-r--r--drivers/net/bnx2x_reg.h2
-rw-r--r--drivers/net/cxgb3/sge.c1
-rw-r--r--drivers/net/e1000e/82571.c6
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/gianfar.c14
-rw-r--r--drivers/net/ibm_newemac/mal.c4
-rw-r--r--drivers/net/ibm_newemac/mal.h2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c62
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h3
-rw-r--r--drivers/net/korina.c173
-rw-r--r--drivers/net/macb.c8
-rw-r--r--drivers/net/mlx4/profile.c6
-rw-r--r--drivers/net/mv643xx_eth.c17
-rw-r--r--drivers/net/myri10ge/myri10ge.c15
-rw-r--r--drivers/net/netxen/netxen_nic.h158
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c50
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c36
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c97
-rw-r--r--drivers/net/netxen/netxen_nic_init.c105
-rw-r--r--drivers/net/netxen/netxen_nic_main.c233
-rw-r--r--drivers/net/phy/mdio_bus.c8
-rw-r--r--drivers/net/phy/phy_device.c9
-rw-r--r--drivers/net/phy/smsc.c12
-rw-r--r--drivers/net/ppp_generic.c43
-rw-r--r--drivers/net/sis900.c8
-rw-r--r--drivers/net/tg3.c81
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/mcs7830.c20
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/wan/ixp4xx_hss.c6
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c9
-rw-r--r--drivers/net/wireless/Kconfig2
-rw-r--r--drivers/net/wireless/ath5k/base.c8
-rw-r--r--drivers/net/wireless/ath5k/pcu.c4
-rw-r--r--drivers/net/wireless/ath5k/reg.h2
-rw-r--r--drivers/net/wireless/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath9k/regd_common.h2
-rw-r--r--drivers/net/wireless/ath9k/xmit.c48
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c4
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h91
-rw-r--r--drivers/net/wireless/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco.c60
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c1
-rw-r--r--drivers/net/wireless/p54/p54common.c60
-rw-r--r--drivers/net/wireless/p54/p54usb.c43
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c23
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00rfkill.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/oprofile/cpu_buffer.c5
-rw-r--r--drivers/oprofile/cpu_buffer.h7
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/msi.c16
-rw-r--r--drivers/pci/pci-driver.c91
-rw-r--r--drivers/pci/pci.c71
-rw-r--r--drivers/pci/pci.h6
-rw-r--r--drivers/platform/x86/Kconfig23
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/dell-laptop.c (renamed from drivers/misc/dell-laptop.c)2
-rw-r--r--drivers/platform/x86/eeepc-laptop.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c910
-rw-r--r--drivers/power/pda_power.c2
-rw-r--r--drivers/ps3/ps3-lpm.c16
-rw-r--r--drivers/ps3/ps3-vuart.c32
-rw-r--r--drivers/ps3/ps3stor_lib.c14
-rw-r--r--drivers/regulator/wm8400-regulator.c2
-rw-r--r--drivers/rtc/rtc-pxa.c2
-rw-r--r--drivers/rtc/rtc-twl4030.c49
-rw-r--r--drivers/s390/net/lcs.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c14
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h6
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c3
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/ps3rom.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c7
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/serial/8250_pci.c8
-rw-r--r--drivers/serial/8250_pnp.c2
-rw-r--r--drivers/serial/atmel_serial.c2
-rw-r--r--drivers/serial/of_serial.c1
-rw-r--r--drivers/serial/pnx8xxx_uart.c23
-rw-r--r--drivers/serial/sh-sci.h5
-rw-r--r--drivers/spi/Kconfig11
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel_spi.c3
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/video/Kconfig12
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/aty/radeon_i2c.c1
-rw-r--r--drivers/video/bf54x-lq043fb.c15
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c15
-rw-r--r--drivers/video/i810/i810-i2c.c1
-rw-r--r--drivers/video/intelfb/intelfb_i2c.c1
-rw-r--r--drivers/video/mx3fb.c1555
-rw-r--r--drivers/video/nvidia/nv_i2c.c1
-rw-r--r--drivers/video/ps3fb.c4
-rw-r--r--drivers/video/savage/savagefb-i2c.c1
-rw-r--r--drivers/xen/balloon.c8
-rw-r--r--drivers/xen/xenfs/xenbus.c11
275 files changed, 9917 insertions, 2080 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index d80f4cc2e0da..65d90c720b5a 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,7 +19,7 @@ obj-y += osl.o utils.o reboot.o\
19 19
20# sleep related files 20# sleep related files
21obj-y += wakeup.o 21obj-y += wakeup.o
22obj-y += main.o 22obj-y += sleep.o
23obj-$(CONFIG_ACPI_SLEEP) += proc.o 23obj-$(CONFIG_ACPI_SLEEP) += proc.o
24 24
25 25
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 8dfcbb8aff73..a2b82c90a683 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -120,31 +120,6 @@ static struct acpi_ec {
120 spinlock_t curr_lock; 120 spinlock_t curr_lock;
121} *boot_ec, *first_ec; 121} *boot_ec, *first_ec;
122 122
123/*
124 * Some Asus system have exchanged ECDT data/command IO addresses.
125 */
126static int print_ecdt_error(const struct dmi_system_id *id)
127{
128 printk(KERN_NOTICE PREFIX "%s detected - "
129 "ECDT has exchanged control/data I/O address\n",
130 id->ident);
131 return 0;
132}
133
134static struct dmi_system_id __cpuinitdata ec_dmi_table[] = {
135 {
136 print_ecdt_error, "Asus L4R", {
137 DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
138 DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
139 DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
140 {
141 print_ecdt_error, "Asus M6R", {
142 DMI_MATCH(DMI_BIOS_VERSION, "0207"),
143 DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
144 DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
145 {},
146};
147
148/* -------------------------------------------------------------------------- 123/* --------------------------------------------------------------------------
149 Transaction Management 124 Transaction Management
150 -------------------------------------------------------------------------- */ 125 -------------------------------------------------------------------------- */
@@ -983,8 +958,8 @@ static const struct acpi_device_id ec_device_ids[] = {
983int __init acpi_ec_ecdt_probe(void) 958int __init acpi_ec_ecdt_probe(void)
984{ 959{
985 acpi_status status; 960 acpi_status status;
961 struct acpi_ec *saved_ec = NULL;
986 struct acpi_table_ecdt *ecdt_ptr; 962 struct acpi_table_ecdt *ecdt_ptr;
987 acpi_handle dummy;
988 963
989 boot_ec = make_acpi_ec(); 964 boot_ec = make_acpi_ec();
990 if (!boot_ec) 965 if (!boot_ec)
@@ -998,21 +973,16 @@ int __init acpi_ec_ecdt_probe(void)
998 pr_info(PREFIX "EC description table is found, configuring boot EC\n"); 973 pr_info(PREFIX "EC description table is found, configuring boot EC\n");
999 boot_ec->command_addr = ecdt_ptr->control.address; 974 boot_ec->command_addr = ecdt_ptr->control.address;
1000 boot_ec->data_addr = ecdt_ptr->data.address; 975 boot_ec->data_addr = ecdt_ptr->data.address;
1001 if (dmi_check_system(ec_dmi_table)) {
1002 /*
1003 * If the board falls into ec_dmi_table, it means
1004 * that ECDT table gives the incorrect command/status
1005 * & data I/O address. Just fix it.
1006 */
1007 boot_ec->data_addr = ecdt_ptr->control.address;
1008 boot_ec->command_addr = ecdt_ptr->data.address;
1009 }
1010 boot_ec->gpe = ecdt_ptr->gpe; 976 boot_ec->gpe = ecdt_ptr->gpe;
1011 boot_ec->handle = ACPI_ROOT_OBJECT; 977 boot_ec->handle = ACPI_ROOT_OBJECT;
1012 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 978 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
1013 /* Add some basic check against completely broken table */ 979 /* Don't trust ECDT, which comes from ASUSTek */
1014 if (boot_ec->data_addr != boot_ec->command_addr) 980 if (!dmi_name_in_vendors("ASUS"))
1015 goto install; 981 goto install;
982 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
983 if (!saved_ec)
984 return -ENOMEM;
985 memcpy(&saved_ec, boot_ec, sizeof(saved_ec));
1016 /* fall through */ 986 /* fall through */
1017 } 987 }
1018 /* This workaround is needed only on some broken machines, 988 /* This workaround is needed only on some broken machines,
@@ -1023,12 +993,29 @@ int __init acpi_ec_ecdt_probe(void)
1023 /* Check that acpi_get_devices actually find something */ 993 /* Check that acpi_get_devices actually find something */
1024 if (ACPI_FAILURE(status) || !boot_ec->handle) 994 if (ACPI_FAILURE(status) || !boot_ec->handle)
1025 goto error; 995 goto error;
1026 /* We really need to limit this workaround, the only ASUS, 996 if (saved_ec) {
1027 * which needs it, has fake EC._INI method, so use it as flag. 997 /* try to find good ECDT from ASUSTek */
1028 * Keep boot_ec struct as it will be needed soon. 998 if (saved_ec->command_addr != boot_ec->command_addr ||
1029 */ 999 saved_ec->data_addr != boot_ec->data_addr ||
1030 if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &dummy))) 1000 saved_ec->gpe != boot_ec->gpe ||
1031 return -ENODEV; 1001 saved_ec->handle != boot_ec->handle)
1002 pr_info(PREFIX "ASUSTek keeps feeding us with broken "
1003 "ECDT tables, which are very hard to workaround. "
1004 "Trying to use DSDT EC info instead. Please send "
1005 "output of acpidump to linux-acpi@vger.kernel.org\n");
1006 kfree(saved_ec);
1007 saved_ec = NULL;
1008 } else {
1009 /* We really need to limit this workaround, the only ASUS,
1010 * which needs it, has fake EC._INI method, so use it as flag.
1011 * Keep boot_ec struct as it will be needed soon.
1012 */
1013 acpi_handle dummy;
1014 if (!dmi_name_in_vendors("ASUS") ||
1015 ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI",
1016 &dummy)))
1017 return -ENODEV;
1018 }
1032install: 1019install:
1033 if (!ec_install_handlers(boot_ec)) { 1020 if (!ec_install_handlers(boot_ec)) {
1034 first_ec = boot_ec; 1021 first_ec = boot_ec;
diff --git a/drivers/acpi/main.c b/drivers/acpi/sleep.c
index 7e3c609cbef2..7e3c609cbef2 100644
--- a/drivers/acpi/main.c
+++ b/drivers/acpi/sleep.c
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 073ff09218a9..99e6f1f8ea45 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -416,7 +416,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
416 } 416 }
417 417
418 /* Passive (optional) */ 418 /* Passive (optional) */
419 if (flag & ACPI_TRIPS_PASSIVE) { 419 if (((flag & ACPI_TRIPS_PASSIVE) && tz->trips.passive.flags.valid) ||
420 (flag == ACPI_TRIPS_INIT)) {
420 valid = tz->trips.passive.flags.valid; 421 valid = tz->trips.passive.flags.valid;
421 if (psv == -1) { 422 if (psv == -1) {
422 status = AE_SUPPORT; 423 status = AE_SUPPORT;
@@ -462,8 +463,11 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
462 memset(&devices, 0, sizeof(struct acpi_handle_list)); 463 memset(&devices, 0, sizeof(struct acpi_handle_list));
463 status = acpi_evaluate_reference(tz->device->handle, "_PSL", 464 status = acpi_evaluate_reference(tz->device->handle, "_PSL",
464 NULL, &devices); 465 NULL, &devices);
465 if (ACPI_FAILURE(status)) 466 if (ACPI_FAILURE(status)) {
467 printk(KERN_WARNING PREFIX
468 "Invalid passive threshold\n");
466 tz->trips.passive.flags.valid = 0; 469 tz->trips.passive.flags.valid = 0;
470 }
467 else 471 else
468 tz->trips.passive.flags.valid = 1; 472 tz->trips.passive.flags.valid = 1;
469 473
@@ -487,7 +491,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
487 if (act == -1) 491 if (act == -1)
488 break; /* disable all active trip points */ 492 break; /* disable all active trip points */
489 493
490 if (flag & ACPI_TRIPS_ACTIVE) { 494 if ((flag == ACPI_TRIPS_INIT) || ((flag & ACPI_TRIPS_ACTIVE) &&
495 tz->trips.active[i].flags.valid)) {
491 status = acpi_evaluate_integer(tz->device->handle, 496 status = acpi_evaluate_integer(tz->device->handle,
492 name, NULL, &tmp); 497 name, NULL, &tmp);
493 if (ACPI_FAILURE(status)) { 498 if (ACPI_FAILURE(status)) {
@@ -521,8 +526,11 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
521 memset(&devices, 0, sizeof(struct acpi_handle_list)); 526 memset(&devices, 0, sizeof(struct acpi_handle_list));
522 status = acpi_evaluate_reference(tz->device->handle, 527 status = acpi_evaluate_reference(tz->device->handle,
523 name, NULL, &devices); 528 name, NULL, &devices);
524 if (ACPI_FAILURE(status)) 529 if (ACPI_FAILURE(status)) {
530 printk(KERN_WARNING PREFIX
531 "Invalid active%d threshold\n", i);
525 tz->trips.active[i].flags.valid = 0; 532 tz->trips.active[i].flags.valid = 0;
533 }
526 else 534 else
527 tz->trips.active[i].flags.valid = 1; 535 tz->trips.active[i].flags.valid = 1;
528 536
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 1a7be96d627b..503a908afc80 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -698,6 +698,15 @@ config PATA_IXP4XX_CF
698 698
699 If unsure, say N. 699 If unsure, say N.
700 700
701config PATA_OCTEON_CF
702 tristate "OCTEON Boot Bus Compact Flash support"
703 depends on CPU_CAVIUM_OCTEON
704 help
705 This option enables a polled compact flash driver for use with
706 compact flash cards attached to the OCTEON boot bus.
707
708 If unsure, say N.
709
701config PATA_SCC 710config PATA_SCC
702 tristate "Toshiba's Cell Reference Set IDE support" 711 tristate "Toshiba's Cell Reference Set IDE support"
703 depends on PCI && PPC_CELLEB 712 depends on PCI && PPC_CELLEB
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 674965fa326d..7f1ecf99528c 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -69,6 +69,7 @@ obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o
69obj-$(CONFIG_PATA_SCC) += pata_scc.o 69obj-$(CONFIG_PATA_SCC) += pata_scc.o
70obj-$(CONFIG_PATA_SCH) += pata_sch.o 70obj-$(CONFIG_PATA_SCH) += pata_sch.o
71obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o 71obj-$(CONFIG_PATA_BF54X) += pata_bf54x.o
72obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
72obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o 73obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
73obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o 74obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
74obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o 75obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 71218d76d75e..88c242856dae 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3029,33 +3029,33 @@ int sata_set_spd(struct ata_link *link)
3029 */ 3029 */
3030 3030
3031static const struct ata_timing ata_timing[] = { 3031static const struct ata_timing ata_timing[] = {
3032/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ 3032/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */
3033 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, 3033 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 },
3034 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, 3034 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 },
3035 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, 3035 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 },
3036 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, 3036 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 },
3037 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, 3037 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 },
3038 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, 3038 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 },
3039 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, 3039 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 },
3040 3040
3041 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, 3041 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 },
3042 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 3042 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 },
3043 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, 3043 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 },
3044 3044
3045 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, 3045 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 },
3046 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, 3046 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 },
3047 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, 3047 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 },
3048 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, 3048 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 },
3049 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, 3049 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 },
3050 3050
3051/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3051/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3052 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3052 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 },
3053 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, 3053 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 },
3054 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, 3054 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 },
3055 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, 3055 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 },
3056 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, 3056 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 },
3057 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, 3057 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
3058 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, 3058 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
3059 3059
3060 { 0xFF } 3060 { 0xFF }
3061}; 3061};
@@ -3065,14 +3065,15 @@ static const struct ata_timing ata_timing[] = {
3065 3065
3066static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3066static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3067{ 3067{
3068 q->setup = EZ(t->setup * 1000, T); 3068 q->setup = EZ(t->setup * 1000, T);
3069 q->act8b = EZ(t->act8b * 1000, T); 3069 q->act8b = EZ(t->act8b * 1000, T);
3070 q->rec8b = EZ(t->rec8b * 1000, T); 3070 q->rec8b = EZ(t->rec8b * 1000, T);
3071 q->cyc8b = EZ(t->cyc8b * 1000, T); 3071 q->cyc8b = EZ(t->cyc8b * 1000, T);
3072 q->active = EZ(t->active * 1000, T); 3072 q->active = EZ(t->active * 1000, T);
3073 q->recover = EZ(t->recover * 1000, T); 3073 q->recover = EZ(t->recover * 1000, T);
3074 q->cycle = EZ(t->cycle * 1000, T); 3074 q->dmack_hold = EZ(t->dmack_hold * 1000, T);
3075 q->udma = EZ(t->udma * 1000, UT); 3075 q->cycle = EZ(t->cycle * 1000, T);
3076 q->udma = EZ(t->udma * 1000, UT);
3076} 3077}
3077 3078
3078void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3079void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
@@ -3084,6 +3085,7 @@ void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3084 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3085 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3085 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3086 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3086 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3087 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3088 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold);
3087 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3089 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3088 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3090 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3089} 3091}
@@ -6638,7 +6640,6 @@ EXPORT_SYMBOL_GPL(ata_dev_pair);
6638EXPORT_SYMBOL_GPL(ata_port_disable); 6640EXPORT_SYMBOL_GPL(ata_port_disable);
6639EXPORT_SYMBOL_GPL(ata_ratelimit); 6641EXPORT_SYMBOL_GPL(ata_ratelimit);
6640EXPORT_SYMBOL_GPL(ata_wait_register); 6642EXPORT_SYMBOL_GPL(ata_wait_register);
6641EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6642EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6643EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6643EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6644EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6644EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6645EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 9e92107691f2..a1a6e6298c33 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -423,9 +423,9 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
423 * RETURNS: 423 * RETURNS:
424 * Zero on success, negative errno on error. 424 * Zero on success, negative errno on error.
425 */ 425 */
426static int ata_get_identity(struct scsi_device *sdev, void __user *arg) 426static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
427 void __user *arg)
427{ 428{
428 struct ata_port *ap = ata_shost_to_port(sdev->host);
429 struct ata_device *dev = ata_scsi_find_dev(ap, sdev); 429 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
430 u16 __user *dst = arg; 430 u16 __user *dst = arg;
431 char buf[40]; 431 char buf[40];
@@ -645,7 +645,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
645 return rc; 645 return rc;
646} 646}
647 647
648int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) 648int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
649 int cmd, void __user *arg)
649{ 650{
650 int val = -EINVAL, rc = -EINVAL; 651 int val = -EINVAL, rc = -EINVAL;
651 652
@@ -663,7 +664,7 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
663 return 0; 664 return 0;
664 665
665 case HDIO_GET_IDENTITY: 666 case HDIO_GET_IDENTITY:
666 return ata_get_identity(scsidev, arg); 667 return ata_get_identity(ap, scsidev, arg);
667 668
668 case HDIO_DRIVE_CMD: 669 case HDIO_DRIVE_CMD:
669 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 670 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
@@ -682,6 +683,14 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
682 683
683 return rc; 684 return rc;
684} 685}
686EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
687
688int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
689{
690 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
691 scsidev, cmd, arg);
692}
693EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
685 694
686/** 695/**
687 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 696 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index a7999c19f0c9..eb99dbe78081 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -41,7 +41,7 @@ static int ali_atapi_dma = 0;
41module_param_named(atapi_dma, ali_atapi_dma, int, 0644); 41module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
42MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)"); 42MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
43 43
44static struct pci_dev *isa_bridge; 44static struct pci_dev *ali_isa_bridge;
45 45
46/* 46/*
47 * Cable special cases 47 * Cable special cases
@@ -346,13 +346,13 @@ static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes)
346 int port_bit = 4 << link->ap->port_no; 346 int port_bit = 4 << link->ap->port_no;
347 347
348 /* If our bridge is an ALI 1533 then do the extra work */ 348 /* If our bridge is an ALI 1533 then do the extra work */
349 if (isa_bridge) { 349 if (ali_isa_bridge) {
350 /* Tristate and re-enable the bus signals */ 350 /* Tristate and re-enable the bus signals */
351 pci_read_config_byte(isa_bridge, 0x58, &r); 351 pci_read_config_byte(ali_isa_bridge, 0x58, &r);
352 r &= ~port_bit; 352 r &= ~port_bit;
353 pci_write_config_byte(isa_bridge, 0x58, r); 353 pci_write_config_byte(ali_isa_bridge, 0x58, r);
354 r |= port_bit; 354 r |= port_bit;
355 pci_write_config_byte(isa_bridge, 0x58, r); 355 pci_write_config_byte(ali_isa_bridge, 0x58, r);
356 } 356 }
357 ata_sff_postreset(link, classes); 357 ata_sff_postreset(link, classes);
358} 358}
@@ -467,14 +467,14 @@ static void ali_init_chipset(struct pci_dev *pdev)
467 pci_write_config_byte(pdev, 0x53, tmp); 467 pci_write_config_byte(pdev, 0x53, tmp);
468 } 468 }
469 north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); 469 north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
470 if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) { 470 if (north && north->vendor == PCI_VENDOR_ID_AL && ali_isa_bridge) {
471 /* Configure the ALi bridge logic. For non ALi rely on BIOS. 471 /* Configure the ALi bridge logic. For non ALi rely on BIOS.
472 Set the south bridge enable bit */ 472 Set the south bridge enable bit */
473 pci_read_config_byte(isa_bridge, 0x79, &tmp); 473 pci_read_config_byte(ali_isa_bridge, 0x79, &tmp);
474 if (pdev->revision == 0xC2) 474 if (pdev->revision == 0xC2)
475 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04); 475 pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x04);
476 else if (pdev->revision > 0xC2 && pdev->revision < 0xC5) 476 else if (pdev->revision > 0xC2 && pdev->revision < 0xC5)
477 pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02); 477 pci_write_config_byte(ali_isa_bridge, 0x79, tmp | 0x02);
478 } 478 }
479 pci_dev_put(north); 479 pci_dev_put(north);
480 ata_pci_bmdma_clear_simplex(pdev); 480 ata_pci_bmdma_clear_simplex(pdev);
@@ -571,9 +571,9 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
571 571
572 ali_init_chipset(pdev); 572 ali_init_chipset(pdev);
573 573
574 if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) { 574 if (ali_isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) {
575 /* Are we paired with a UDMA capable chip */ 575 /* Are we paired with a UDMA capable chip */
576 pci_read_config_byte(isa_bridge, 0x5E, &tmp); 576 pci_read_config_byte(ali_isa_bridge, 0x5E, &tmp);
577 if ((tmp & 0x1E) == 0x12) 577 if ((tmp & 0x1E) == 0x12)
578 ppi[0] = &info_20_udma; 578 ppi[0] = &info_20_udma;
579 } 579 }
@@ -617,11 +617,11 @@ static struct pci_driver ali_pci_driver = {
617static int __init ali_init(void) 617static int __init ali_init(void)
618{ 618{
619 int ret; 619 int ret;
620 isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 620 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
621 621
622 ret = pci_register_driver(&ali_pci_driver); 622 ret = pci_register_driver(&ali_pci_driver);
623 if (ret < 0) 623 if (ret < 0)
624 pci_dev_put(isa_bridge); 624 pci_dev_put(ali_isa_bridge);
625 return ret; 625 return ret;
626} 626}
627 627
@@ -629,7 +629,7 @@ static int __init ali_init(void)
629static void __exit ali_exit(void) 629static void __exit ali_exit(void)
630{ 630{
631 pci_unregister_driver(&ali_pci_driver); 631 pci_unregister_driver(&ali_pci_driver);
632 pci_dev_put(isa_bridge); 632 pci_dev_put(ali_isa_bridge);
633} 633}
634 634
635 635
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 0e2cde8f9973..506adde8ebb3 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -32,21 +32,6 @@ enum {
32 ATIIXP_IDE_UDMA_MODE = 0x56 32 ATIIXP_IDE_UDMA_MODE = 0x56
33}; 33};
34 34
35static int atiixp_pre_reset(struct ata_link *link, unsigned long deadline)
36{
37 struct ata_port *ap = link->ap;
38 static const struct pci_bits atiixp_enable_bits[] = {
39 { 0x48, 1, 0x01, 0x00 },
40 { 0x48, 1, 0x08, 0x00 }
41 };
42 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
43
44 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
45 return -ENOENT;
46
47 return ata_sff_prereset(link, deadline);
48}
49
50static int atiixp_cable_detect(struct ata_port *ap) 35static int atiixp_cable_detect(struct ata_port *ap)
51{ 36{
52 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 37 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
@@ -229,10 +214,9 @@ static struct ata_port_operations atiixp_port_ops = {
229 .cable_detect = atiixp_cable_detect, 214 .cable_detect = atiixp_cable_detect,
230 .set_piomode = atiixp_set_piomode, 215 .set_piomode = atiixp_set_piomode,
231 .set_dmamode = atiixp_set_dmamode, 216 .set_dmamode = atiixp_set_dmamode,
232 .prereset = atiixp_pre_reset,
233}; 217};
234 218
235static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id) 219static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
236{ 220{
237 static const struct ata_port_info info = { 221 static const struct ata_port_info info = {
238 .flags = ATA_FLAG_SLAVE_POSS, 222 .flags = ATA_FLAG_SLAVE_POSS,
@@ -241,8 +225,18 @@ static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
241 .udma_mask = 0x3F, 225 .udma_mask = 0x3F,
242 .port_ops = &atiixp_port_ops 226 .port_ops = &atiixp_port_ops
243 }; 227 };
244 const struct ata_port_info *ppi[] = { &info, NULL }; 228 static const struct pci_bits atiixp_enable_bits[] = {
245 return ata_pci_sff_init_one(dev, ppi, &atiixp_sht, NULL); 229 { 0x48, 1, 0x01, 0x00 },
230 { 0x48, 1, 0x08, 0x00 }
231 };
232 const struct ata_port_info *ppi[] = { &info, &info };
233 int i;
234
235 for (i = 0; i < 2; i++)
236 if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i]))
237 ppi[i] = &ata_dummy_port_info;
238
239 return ata_pci_sff_init_one(pdev, ppi, &atiixp_sht, NULL);
246} 240}
247 241
248static const struct pci_device_id atiixp[] = { 242static const struct pci_device_id atiixp[] = {
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
new file mode 100644
index 000000000000..0fe4ef309c62
--- /dev/null
+++ b/drivers/ata/pata_octeon_cf.c
@@ -0,0 +1,965 @@
1/*
2 * Driver for the Octeon bootbus compact flash.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2005 - 2009 Cavium Networks
9 * Copyright (C) 2008 Wind River Systems
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/libata.h>
15#include <linux/irq.h>
16#include <linux/platform_device.h>
17#include <linux/workqueue.h>
18#include <scsi/scsi_host.h>
19
20#include <asm/octeon/octeon.h>
21
22/*
23 * The Octeon bootbus compact flash interface is connected in at least
24 * 3 different configurations on various evaluation boards:
25 *
26 * -- 8 bits no irq, no DMA
27 * -- 16 bits no irq, no DMA
28 * -- 16 bits True IDE mode with DMA, but no irq.
29 *
30 * In the last case the DMA engine can generate an interrupt when the
31 * transfer is complete. For the first two cases only PIO is supported.
32 *
33 */
34
35#define DRV_NAME "pata_octeon_cf"
36#define DRV_VERSION "2.1"
37
38
39struct octeon_cf_port {
40 struct workqueue_struct *wq;
41 struct delayed_work delayed_finish;
42 struct ata_port *ap;
43 int dma_finished;
44};
45
46static struct scsi_host_template octeon_cf_sht = {
47 ATA_PIO_SHT(DRV_NAME),
48};
49
50/**
51 * Convert nanosecond based time to setting used in the
52 * boot bus timing register, based on timing multiple
53 */
54static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
55{
56 unsigned int val;
57
58 /*
59 * Compute # of eclock periods to get desired duration in
60 * nanoseconds.
61 */
62 val = DIV_ROUND_UP(nsecs * (octeon_get_clock_rate() / 1000000),
63 1000 * tim_mult);
64
65 return val;
66}
67
68static void octeon_cf_set_boot_reg_cfg(int cs)
69{
70 union cvmx_mio_boot_reg_cfgx reg_cfg;
71 reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
72 reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
73 reg_cfg.s.tim_mult = 2; /* Timing mutiplier 2x */
74 reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
75 reg_cfg.s.sam = 0; /* Don't combine write and output enable */
76 reg_cfg.s.we_ext = 0; /* No write enable extension */
77 reg_cfg.s.oe_ext = 0; /* No read enable extension */
78 reg_cfg.s.en = 1; /* Enable this region */
79 reg_cfg.s.orbit = 0; /* Don't combine with previous region */
80 reg_cfg.s.ale = 0; /* Don't do address multiplexing */
81 cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
82}
83
84/**
85 * Called after libata determines the needed PIO mode. This
86 * function programs the Octeon bootbus regions to support the
87 * timing requirements of the PIO mode.
88 *
89 * @ap: ATA port information
90 * @dev: ATA device
91 */
92static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
93{
94 struct octeon_cf_data *ocd = ap->dev->platform_data;
95 union cvmx_mio_boot_reg_timx reg_tim;
96 int cs = ocd->base_region;
97 int T;
98 struct ata_timing timing;
99
100 int use_iordy;
101 int trh;
102 int pause;
103 /* These names are timing parameters from the ATA spec */
104 int t1;
105 int t2;
106 int t2i;
107
108 T = (int)(2000000000000LL / octeon_get_clock_rate());
109
110 if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
111 BUG();
112
113 t1 = timing.setup;
114 if (t1)
115 t1--;
116 t2 = timing.active;
117 if (t2)
118 t2--;
119 t2i = timing.act8b;
120 if (t2i)
121 t2i--;
122
123 trh = ns_to_tim_reg(2, 20);
124 if (trh)
125 trh--;
126
127 pause = timing.cycle - timing.active - timing.setup - trh;
128 if (pause)
129 pause--;
130
131 octeon_cf_set_boot_reg_cfg(cs);
132 if (ocd->dma_engine >= 0)
133 /* True IDE mode, program both chip selects. */
134 octeon_cf_set_boot_reg_cfg(cs + 1);
135
136
137 use_iordy = ata_pio_need_iordy(dev);
138
139 reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
140 /* Disable page mode */
141 reg_tim.s.pagem = 0;
142 /* Enable dynamic timing */
143 reg_tim.s.waitm = use_iordy;
144 /* Pages are disabled */
145 reg_tim.s.pages = 0;
146 /* We don't use multiplexed address mode */
147 reg_tim.s.ale = 0;
148 /* Not used */
149 reg_tim.s.page = 0;
150 /* Time after IORDY to coninue to assert the data */
151 reg_tim.s.wait = 0;
152 /* Time to wait to complete the cycle. */
153 reg_tim.s.pause = pause;
154 /* How long to hold after a write to de-assert CE. */
155 reg_tim.s.wr_hld = trh;
156 /* How long to wait after a read to de-assert CE. */
157 reg_tim.s.rd_hld = trh;
158 /* How long write enable is asserted */
159 reg_tim.s.we = t2;
160 /* How long read enable is asserted */
161 reg_tim.s.oe = t2;
162 /* Time after CE that read/write starts */
163 reg_tim.s.ce = ns_to_tim_reg(2, 5);
164 /* Time before CE that address is valid */
165 reg_tim.s.adr = 0;
166
167 /* Program the bootbus region timing for the data port chip select. */
168 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
169 if (ocd->dma_engine >= 0)
170 /* True IDE mode, program both chip selects. */
171 cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
172}
173
174static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
175{
176 struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
177 union cvmx_mio_boot_dma_timx dma_tim;
178 unsigned int oe_a;
179 unsigned int oe_n;
180 unsigned int dma_ackh;
181 unsigned int dma_arq;
182 unsigned int pause;
183 unsigned int T0, Tkr, Td;
184 unsigned int tim_mult;
185
186 const struct ata_timing *timing;
187
188 timing = ata_timing_find_mode(dev->dma_mode);
189 T0 = timing->cycle;
190 Td = timing->active;
191 Tkr = timing->recover;
192 dma_ackh = timing->dmack_hold;
193
194 dma_tim.u64 = 0;
195 /* dma_tim.s.tim_mult = 0 --> 4x */
196 tim_mult = 4;
197
198 /* not spec'ed, value in eclocks, not affected by tim_mult */
199 dma_arq = 8;
200 pause = 25 - dma_arq * 1000 /
201 (octeon_get_clock_rate() / 1000000); /* Tz */
202
203 oe_a = Td;
204 /* Tkr from cf spec, lengthened to meet T0 */
205 oe_n = max(T0 - oe_a, Tkr);
206
207 dma_tim.s.dmack_pi = 1;
208
209 dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
210 dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
211
212 /*
213 * This is tI, C.F. spec. says 0, but Sony CF card requires
214 * more, we use 20 nS.
215 */
216 dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);;
217 dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
218
219 dma_tim.s.dmarq = dma_arq;
220 dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
221
222 dma_tim.s.rd_dly = 0; /* Sample right on edge */
223
224 /* writes only */
225 dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
226 dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
227
228 pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
229 ns_to_tim_reg(tim_mult, 60));
230 pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
231 "%d, dmarq: %d, pause: %d\n",
232 dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
233 dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
234
235 cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
236 dma_tim.u64);
237
238}
239
240/**
241 * Handle an 8 bit I/O request.
242 *
243 * @dev: Device to access
244 * @buffer: Data buffer
245 * @buflen: Length of the buffer.
246 * @rw: True to write.
247 */
248static unsigned int octeon_cf_data_xfer8(struct ata_device *dev,
249 unsigned char *buffer,
250 unsigned int buflen,
251 int rw)
252{
253 struct ata_port *ap = dev->link->ap;
254 void __iomem *data_addr = ap->ioaddr.data_addr;
255 unsigned long words;
256 int count;
257
258 words = buflen;
259 if (rw) {
260 count = 16;
261 while (words--) {
262 iowrite8(*buffer, data_addr);
263 buffer++;
264 /*
265 * Every 16 writes do a read so the bootbus
266 * FIFO doesn't fill up.
267 */
268 if (--count == 0) {
269 ioread8(ap->ioaddr.altstatus_addr);
270 count = 16;
271 }
272 }
273 } else {
274 ioread8_rep(data_addr, buffer, words);
275 }
276 return buflen;
277}
278
279/**
280 * Handle a 16 bit I/O request.
281 *
282 * @dev: Device to access
283 * @buffer: Data buffer
284 * @buflen: Length of the buffer.
285 * @rw: True to write.
286 */
287static unsigned int octeon_cf_data_xfer16(struct ata_device *dev,
288 unsigned char *buffer,
289 unsigned int buflen,
290 int rw)
291{
292 struct ata_port *ap = dev->link->ap;
293 void __iomem *data_addr = ap->ioaddr.data_addr;
294 unsigned long words;
295 int count;
296
297 words = buflen / 2;
298 if (rw) {
299 count = 16;
300 while (words--) {
301 iowrite16(*(uint16_t *)buffer, data_addr);
302 buffer += sizeof(uint16_t);
303 /*
304 * Every 16 writes do a read so the bootbus
305 * FIFO doesn't fill up.
306 */
307 if (--count == 0) {
308 ioread8(ap->ioaddr.altstatus_addr);
309 count = 16;
310 }
311 }
312 } else {
313 while (words--) {
314 *(uint16_t *)buffer = ioread16(data_addr);
315 buffer += sizeof(uint16_t);
316 }
317 }
318 /* Transfer trailing 1 byte, if any. */
319 if (unlikely(buflen & 0x01)) {
320 __le16 align_buf[1] = { 0 };
321
322 if (rw == READ) {
323 align_buf[0] = cpu_to_le16(ioread16(data_addr));
324 memcpy(buffer, align_buf, 1);
325 } else {
326 memcpy(align_buf, buffer, 1);
327 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
328 }
329 words++;
330 }
331 return buflen;
332}
333
334/**
335 * Read the taskfile for 16bit non-True IDE only.
336 */
337static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
338{
339 u16 blob;
340 /* The base of the registers is at ioaddr.data_addr. */
341 void __iomem *base = ap->ioaddr.data_addr;
342
343 blob = __raw_readw(base + 0xc);
344 tf->feature = blob >> 8;
345
346 blob = __raw_readw(base + 2);
347 tf->nsect = blob & 0xff;
348 tf->lbal = blob >> 8;
349
350 blob = __raw_readw(base + 4);
351 tf->lbam = blob & 0xff;
352 tf->lbah = blob >> 8;
353
354 blob = __raw_readw(base + 6);
355 tf->device = blob & 0xff;
356 tf->command = blob >> 8;
357
358 if (tf->flags & ATA_TFLAG_LBA48) {
359 if (likely(ap->ioaddr.ctl_addr)) {
360 iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
361
362 blob = __raw_readw(base + 0xc);
363 tf->hob_feature = blob >> 8;
364
365 blob = __raw_readw(base + 2);
366 tf->hob_nsect = blob & 0xff;
367 tf->hob_lbal = blob >> 8;
368
369 blob = __raw_readw(base + 4);
370 tf->hob_lbam = blob & 0xff;
371 tf->hob_lbah = blob >> 8;
372
373 iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
374 ap->last_ctl = tf->ctl;
375 } else {
376 WARN_ON(1);
377 }
378 }
379}
380
381static u8 octeon_cf_check_status16(struct ata_port *ap)
382{
383 u16 blob;
384 void __iomem *base = ap->ioaddr.data_addr;
385
386 blob = __raw_readw(base + 6);
387 return blob >> 8;
388}
389
390static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
391 unsigned long deadline)
392{
393 struct ata_port *ap = link->ap;
394 void __iomem *base = ap->ioaddr.data_addr;
395 int rc;
396 u8 err;
397
398 DPRINTK("about to softreset\n");
399 __raw_writew(ap->ctl, base + 0xe);
400 udelay(20);
401 __raw_writew(ap->ctl | ATA_SRST, base + 0xe);
402 udelay(20);
403 __raw_writew(ap->ctl, base + 0xe);
404
405 rc = ata_sff_wait_after_reset(link, 1, deadline);
406 if (rc) {
407 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
408 return rc;
409 }
410
411 /* determine by signature whether we have ATA or ATAPI devices */
412 classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
413 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
414 return 0;
415}
416
417/**
418 * Load the taskfile for 16bit non-True IDE only. The device_addr is
419 * not loaded, we do this as part of octeon_cf_exec_command16.
420 */
421static void octeon_cf_tf_load16(struct ata_port *ap,
422 const struct ata_taskfile *tf)
423{
424 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
425 /* The base of the registers is at ioaddr.data_addr. */
426 void __iomem *base = ap->ioaddr.data_addr;
427
428 if (tf->ctl != ap->last_ctl) {
429 iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
430 ap->last_ctl = tf->ctl;
431 ata_wait_idle(ap);
432 }
433 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
434 __raw_writew(tf->hob_feature << 8, base + 0xc);
435 __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
436 __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
437 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
438 tf->hob_feature,
439 tf->hob_nsect,
440 tf->hob_lbal,
441 tf->hob_lbam,
442 tf->hob_lbah);
443 }
444 if (is_addr) {
445 __raw_writew(tf->feature << 8, base + 0xc);
446 __raw_writew(tf->nsect | tf->lbal << 8, base + 2);
447 __raw_writew(tf->lbam | tf->lbah << 8, base + 4);
448 VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
449 tf->feature,
450 tf->nsect,
451 tf->lbal,
452 tf->lbam,
453 tf->lbah);
454 }
455 ata_wait_idle(ap);
456}
457
458
459static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
460{
461/* There is only one device, do nothing. */
462 return;
463}
464
465/*
466 * Issue ATA command to host controller. The device_addr is also sent
467 * as it must be written in a combined write with the command.
468 */
469static void octeon_cf_exec_command16(struct ata_port *ap,
470 const struct ata_taskfile *tf)
471{
472 /* The base of the registers is at ioaddr.data_addr. */
473 void __iomem *base = ap->ioaddr.data_addr;
474 u16 blob;
475
476 if (tf->flags & ATA_TFLAG_DEVICE) {
477 VPRINTK("device 0x%X\n", tf->device);
478 blob = tf->device;
479 } else {
480 blob = 0;
481 }
482
483 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
484 blob |= (tf->command << 8);
485 __raw_writew(blob, base + 6);
486
487
488 ata_wait_idle(ap);
489}
490
491static u8 octeon_cf_irq_on(struct ata_port *ap)
492{
493 return 0;
494}
495
496static void octeon_cf_irq_clear(struct ata_port *ap)
497{
498 return;
499}
500
501static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
502{
503 struct ata_port *ap = qc->ap;
504 struct octeon_cf_port *cf_port;
505
506 cf_port = (struct octeon_cf_port *)ap->private_data;
507 DPRINTK("ENTER\n");
508 /* issue r/w command */
509 qc->cursg = qc->sg;
510 cf_port->dma_finished = 0;
511 ap->ops->sff_exec_command(ap, &qc->tf);
512 DPRINTK("EXIT\n");
513}
514
515/**
516 * Start a DMA transfer that was already setup
517 *
518 * @qc: Information about the DMA
519 */
520static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
521{
522 struct octeon_cf_data *ocd = qc->ap->dev->platform_data;
523 union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
524 union cvmx_mio_boot_dma_intx mio_boot_dma_int;
525 struct scatterlist *sg;
526
527 VPRINTK("%d scatterlists\n", qc->n_elem);
528
529 /* Get the scatter list entry we need to DMA into */
530 sg = qc->cursg;
531 BUG_ON(!sg);
532
533 /*
534 * Clear the DMA complete status.
535 */
536 mio_boot_dma_int.u64 = 0;
537 mio_boot_dma_int.s.done = 1;
538 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
539 mio_boot_dma_int.u64);
540
541 /* Enable the interrupt. */
542 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine),
543 mio_boot_dma_int.u64);
544
545 /* Set the direction of the DMA */
546 mio_boot_dma_cfg.u64 = 0;
547 mio_boot_dma_cfg.s.en = 1;
548 mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
549
550 /*
551 * Don't stop the DMA if the device deasserts DMARQ. Many
552 * compact flashes deassert DMARQ for a short time between
553 * sectors. Instead of stopping and restarting the DMA, we'll
554 * let the hardware do it. If the DMA is really stopped early
555 * due to an error condition, a later timeout will force us to
556 * stop.
557 */
558 mio_boot_dma_cfg.s.clr = 0;
559
560 /* Size is specified in 16bit words and minus one notation */
561 mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
562
563 /* We need to swap the high and low bytes of every 16 bits */
564 mio_boot_dma_cfg.s.swap8 = 1;
565
566 mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
567
568 VPRINTK("%s %d bytes address=%p\n",
569 (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
570 (void *)(unsigned long)mio_boot_dma_cfg.s.adr);
571
572 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine),
573 mio_boot_dma_cfg.u64);
574}
575
576/**
577 *
578 * LOCKING:
579 * spin_lock_irqsave(host lock)
580 *
581 */
582static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
583 struct ata_queued_cmd *qc)
584{
585 struct ata_eh_info *ehi = &ap->link.eh_info;
586 struct octeon_cf_data *ocd = ap->dev->platform_data;
587 union cvmx_mio_boot_dma_cfgx dma_cfg;
588 union cvmx_mio_boot_dma_intx dma_int;
589 struct octeon_cf_port *cf_port;
590 u8 status;
591
592 VPRINTK("ata%u: protocol %d task_state %d\n",
593 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
594
595
596 if (ap->hsm_task_state != HSM_ST_LAST)
597 return 0;
598
599 cf_port = (struct octeon_cf_port *)ap->private_data;
600
601 dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
602 if (dma_cfg.s.size != 0xfffff) {
603 /* Error, the transfer was not complete. */
604 qc->err_mask |= AC_ERR_HOST_BUS;
605 ap->hsm_task_state = HSM_ST_ERR;
606 }
607
608 /* Stop and clear the dma engine. */
609 dma_cfg.u64 = 0;
610 dma_cfg.s.size = -1;
611 cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64);
612
613 /* Disable the interrupt. */
614 dma_int.u64 = 0;
615 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64);
616
617 /* Clear the DMA complete status */
618 dma_int.s.done = 1;
619 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64);
620
621 status = ap->ops->sff_check_status(ap);
622
623 ata_sff_hsm_move(ap, qc, status, 0);
624
625 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
626 ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
627
628 return 1;
629}
630
631/*
632 * Check if any queued commands have more DMAs, if so start the next
633 * transfer, else do end of transfer handling.
634 */
635static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
636{
637 struct ata_host *host = dev_instance;
638 struct octeon_cf_port *cf_port;
639 int i;
640 unsigned int handled = 0;
641 unsigned long flags;
642
643 spin_lock_irqsave(&host->lock, flags);
644
645 DPRINTK("ENTER\n");
646 for (i = 0; i < host->n_ports; i++) {
647 u8 status;
648 struct ata_port *ap;
649 struct ata_queued_cmd *qc;
650 union cvmx_mio_boot_dma_intx dma_int;
651 union cvmx_mio_boot_dma_cfgx dma_cfg;
652 struct octeon_cf_data *ocd;
653
654 ap = host->ports[i];
655 ocd = ap->dev->platform_data;
656 if (!ap || (ap->flags & ATA_FLAG_DISABLED))
657 continue;
658
659 ocd = ap->dev->platform_data;
660 cf_port = (struct octeon_cf_port *)ap->private_data;
661 dma_int.u64 =
662 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
663 dma_cfg.u64 =
664 cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
665
666 qc = ata_qc_from_tag(ap, ap->link.active_tag);
667
668 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
669 (qc->flags & ATA_QCFLAG_ACTIVE)) {
670 if (dma_int.s.done && !dma_cfg.s.en) {
671 if (!sg_is_last(qc->cursg)) {
672 qc->cursg = sg_next(qc->cursg);
673 handled = 1;
674 octeon_cf_dma_start(qc);
675 continue;
676 } else {
677 cf_port->dma_finished = 1;
678 }
679 }
680 if (!cf_port->dma_finished)
681 continue;
682 status = ioread8(ap->ioaddr.altstatus_addr);
683 if (status & (ATA_BUSY | ATA_DRQ)) {
684 /*
685 * We are busy, try to handle it
686 * later. This is the DMA finished
687 * interrupt, and it could take a
688 * little while for the card to be
689 * ready for more commands.
690 */
691 /* Clear DMA irq. */
692 dma_int.u64 = 0;
693 dma_int.s.done = 1;
694 cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
695 dma_int.u64);
696
697 queue_delayed_work(cf_port->wq,
698 &cf_port->delayed_finish, 1);
699 handled = 1;
700 } else {
701 handled |= octeon_cf_dma_finished(ap, qc);
702 }
703 }
704 }
705 spin_unlock_irqrestore(&host->lock, flags);
706 DPRINTK("EXIT\n");
707 return IRQ_RETVAL(handled);
708}
709
710static void octeon_cf_delayed_finish(struct work_struct *work)
711{
712 struct octeon_cf_port *cf_port = container_of(work,
713 struct octeon_cf_port,
714 delayed_finish.work);
715 struct ata_port *ap = cf_port->ap;
716 struct ata_host *host = ap->host;
717 struct ata_queued_cmd *qc;
718 unsigned long flags;
719 u8 status;
720
721 spin_lock_irqsave(&host->lock, flags);
722
723 /*
724 * If the port is not waiting for completion, it must have
725 * handled it previously. The hsm_task_state is
726 * protected by host->lock.
727 */
728 if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
729 goto out;
730
731 status = ioread8(ap->ioaddr.altstatus_addr);
732 if (status & (ATA_BUSY | ATA_DRQ)) {
733 /* Still busy, try again. */
734 queue_delayed_work(cf_port->wq,
735 &cf_port->delayed_finish, 1);
736 goto out;
737 }
738 qc = ata_qc_from_tag(ap, ap->link.active_tag);
739 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
740 (qc->flags & ATA_QCFLAG_ACTIVE))
741 octeon_cf_dma_finished(ap, qc);
742out:
743 spin_unlock_irqrestore(&host->lock, flags);
744}
745
746static void octeon_cf_dev_config(struct ata_device *dev)
747{
748 /*
749 * A maximum of 2^20 - 1 16 bit transfers are possible with
750 * the bootbus DMA. So we need to throttle max_sectors to
751 * (2^12 - 1 == 4095) to assure that this can never happen.
752 */
753 dev->max_sectors = min(dev->max_sectors, 4095U);
754}
755
756/*
757 * Trap if driver tries to do standard bmdma commands. They are not
758 * supported.
759 */
760static void unreachable_qc(struct ata_queued_cmd *qc)
761{
762 BUG();
763}
764
765static u8 unreachable_port(struct ata_port *ap)
766{
767 BUG();
768}
769
770/*
771 * We don't do ATAPI DMA so return 0.
772 */
773static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
774{
775 return 0;
776}
777
778static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
779{
780 struct ata_port *ap = qc->ap;
781
782 switch (qc->tf.protocol) {
783 case ATA_PROT_DMA:
784 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
785
786 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
787 octeon_cf_dma_setup(qc); /* set up dma */
788 octeon_cf_dma_start(qc); /* initiate dma */
789 ap->hsm_task_state = HSM_ST_LAST;
790 break;
791
792 case ATAPI_PROT_DMA:
793 dev_err(ap->dev, "Error, ATAPI not supported\n");
794 BUG();
795
796 default:
797 return ata_sff_qc_issue(qc);
798 }
799
800 return 0;
801}
802
803static struct ata_port_operations octeon_cf_ops = {
804 .inherits = &ata_sff_port_ops,
805 .check_atapi_dma = octeon_cf_check_atapi_dma,
806 .qc_prep = ata_noop_qc_prep,
807 .qc_issue = octeon_cf_qc_issue,
808 .sff_dev_select = octeon_cf_dev_select,
809 .sff_irq_on = octeon_cf_irq_on,
810 .sff_irq_clear = octeon_cf_irq_clear,
811 .bmdma_setup = unreachable_qc,
812 .bmdma_start = unreachable_qc,
813 .bmdma_stop = unreachable_qc,
814 .bmdma_status = unreachable_port,
815 .cable_detect = ata_cable_40wire,
816 .set_piomode = octeon_cf_set_piomode,
817 .set_dmamode = octeon_cf_set_dmamode,
818 .dev_config = octeon_cf_dev_config,
819};
820
821static int __devinit octeon_cf_probe(struct platform_device *pdev)
822{
823 struct resource *res_cs0, *res_cs1;
824
825 void __iomem *cs0;
826 void __iomem *cs1 = NULL;
827 struct ata_host *host;
828 struct ata_port *ap;
829 struct octeon_cf_data *ocd;
830 int irq = 0;
831 irq_handler_t irq_handler = NULL;
832 void __iomem *base;
833 struct octeon_cf_port *cf_port;
834
835 res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
836
837 if (!res_cs0)
838 return -EINVAL;
839
840 ocd = pdev->dev.platform_data;
841
842 cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
843 res_cs0->end - res_cs0->start + 1);
844
845 if (!cs0)
846 return -ENOMEM;
847
848 /* Determine from availability of DMA if True IDE mode or not */
849 if (ocd->dma_engine >= 0) {
850 res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
851 if (!res_cs1)
852 return -EINVAL;
853
854 cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
855 res_cs0->end - res_cs1->start + 1);
856
857 if (!cs1)
858 return -ENOMEM;
859 }
860
861 cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
862 if (!cf_port)
863 return -ENOMEM;
864
865 /* allocate host */
866 host = ata_host_alloc(&pdev->dev, 1);
867 if (!host)
868 goto free_cf_port;
869
870 ap = host->ports[0];
871 ap->private_data = cf_port;
872 cf_port->ap = ap;
873 ap->ops = &octeon_cf_ops;
874 ap->pio_mask = 0x7f; /* Support PIO 0-6 */
875 ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY
876 | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
877
878 base = cs0 + ocd->base_region_bias;
879 if (!ocd->is16bit) {
880 ap->ioaddr.cmd_addr = base;
881 ata_sff_std_ports(&ap->ioaddr);
882
883 ap->ioaddr.altstatus_addr = base + 0xe;
884 ap->ioaddr.ctl_addr = base + 0xe;
885 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
886 } else if (cs1) {
887 /* Presence of cs1 indicates True IDE mode. */
888 ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
889 ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
890 ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
891 ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1;
892 ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1;
893 ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1;
894 ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1;
895 ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1;
896 ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1;
897 ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1;
898 ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1;
899 ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
900 ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
901 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
902
903 ap->mwdma_mask = 0x1f; /* Support MWDMA 0-4 */
904 irq = platform_get_irq(pdev, 0);
905 irq_handler = octeon_cf_interrupt;
906
907 /* True IDE mode needs delayed work to poll for not-busy. */
908 cf_port->wq = create_singlethread_workqueue(DRV_NAME);
909 if (!cf_port->wq)
910 goto free_cf_port;
911 INIT_DELAYED_WORK(&cf_port->delayed_finish,
912 octeon_cf_delayed_finish);
913
914 } else {
915 /* 16 bit but not True IDE */
916 octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
917 octeon_cf_ops.softreset = octeon_cf_softreset16;
918 octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
919 octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
920 octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
921 octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16;
922
923 ap->ioaddr.data_addr = base + ATA_REG_DATA;
924 ap->ioaddr.nsect_addr = base + ATA_REG_NSECT;
925 ap->ioaddr.lbal_addr = base + ATA_REG_LBAL;
926 ap->ioaddr.ctl_addr = base + 0xe;
927 ap->ioaddr.altstatus_addr = base + 0xe;
928 }
929
930 ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
931
932
933 dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
934 (ocd->is16bit) ? 16 : 8,
935 (cs1) ? ", True IDE" : "");
936
937
938 return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
939
940free_cf_port:
941 kfree(cf_port);
942 return -ENOMEM;
943}
944
945static struct platform_driver octeon_cf_driver = {
946 .probe = octeon_cf_probe,
947 .driver = {
948 .name = DRV_NAME,
949 .owner = THIS_MODULE,
950 },
951};
952
953static int __init octeon_cf_init(void)
954{
955 return platform_driver_register(&octeon_cf_driver);
956}
957
958
959MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
960MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
961MODULE_LICENSE("GPL");
962MODULE_VERSION(DRV_VERSION);
963MODULE_ALIAS("platform:" DRV_NAME);
964
965module_init(octeon_cf_init);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 1a56db92ff7a..55bc88c1707b 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1288,7 +1288,7 @@ static const struct ata_port_info sata_fsl_port_info[] = {
1288static int sata_fsl_probe(struct of_device *ofdev, 1288static int sata_fsl_probe(struct of_device *ofdev,
1289 const struct of_device_id *match) 1289 const struct of_device_id *match)
1290{ 1290{
1291 int retval = 0; 1291 int retval = -ENXIO;
1292 void __iomem *hcr_base = NULL; 1292 void __iomem *hcr_base = NULL;
1293 void __iomem *ssr_base = NULL; 1293 void __iomem *ssr_base = NULL;
1294 void __iomem *csr_base = NULL; 1294 void __iomem *csr_base = NULL;
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index c18935f0bda2..5c62da9cd491 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -92,6 +92,8 @@ static const struct pci_device_id svia_pci_tbl[] = {
92 { PCI_VDEVICE(VIA, 0x5372), vt6420 }, 92 { PCI_VDEVICE(VIA, 0x5372), vt6420 },
93 { PCI_VDEVICE(VIA, 0x7372), vt6420 }, 93 { PCI_VDEVICE(VIA, 0x7372), vt6420 },
94 { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */ 94 { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
95 { PCI_VDEVICE(VIA, 0x9000), vt8251 },
96 { PCI_VDEVICE(VIA, 0x9040), vt8251 },
95 97
96 { } /* terminate list */ 98 { } /* terminate list */
97}; 99};
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 8079afca4972..55e530942ab0 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -777,10 +777,16 @@ static void device_remove_class_symlinks(struct device *dev)
777int dev_set_name(struct device *dev, const char *fmt, ...) 777int dev_set_name(struct device *dev, const char *fmt, ...)
778{ 778{
779 va_list vargs; 779 va_list vargs;
780 char *s;
780 781
781 va_start(vargs, fmt); 782 va_start(vargs, fmt);
782 vsnprintf(dev->bus_id, sizeof(dev->bus_id), fmt, vargs); 783 vsnprintf(dev->bus_id, sizeof(dev->bus_id), fmt, vargs);
783 va_end(vargs); 784 va_end(vargs);
785
786 /* ewww... some of these buggers have / in the name... */
787 while ((s = strchr(dev->bus_id, '/')))
788 *s = '!';
789
784 return 0; 790 return 0;
785} 791}
786EXPORT_SYMBOL_GPL(dev_set_name); 792EXPORT_SYMBOL_GPL(dev_set_name);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 7bcc1d8bc967..34f80fa6fed1 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -406,6 +406,7 @@ static int nbd_do_it(struct nbd_device *lo)
406 ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr); 406 ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
407 if (ret) { 407 if (ret) {
408 printk(KERN_ERR "nbd: sysfs_create_file failed!"); 408 printk(KERN_ERR "nbd: sysfs_create_file failed!");
409 lo->pid = 0;
409 return ret; 410 return ret;
410 } 411 }
411 412
@@ -413,6 +414,7 @@ static int nbd_do_it(struct nbd_device *lo)
413 nbd_end_request(req); 414 nbd_end_request(req);
414 415
415 sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr); 416 sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr);
417 lo->pid = 0;
416 return 0; 418 return 0;
417} 419}
418 420
@@ -648,6 +650,8 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
648 set_capacity(lo->disk, lo->bytesize >> 9); 650 set_capacity(lo->disk, lo->bytesize >> 9);
649 return 0; 651 return 0;
650 case NBD_DO_IT: 652 case NBD_DO_IT:
653 if (lo->pid)
654 return -EBUSY;
651 if (!lo->file) 655 if (!lo->file)
652 return -EINVAL; 656 return -EINVAL;
653 thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); 657 thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 936466f62afd..bccc42bb9212 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -141,7 +141,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
141 141
142 start_sector = req->sector * priv->blocking_factor; 142 start_sector = req->sector * priv->blocking_factor;
143 sectors = req->nr_sectors * priv->blocking_factor; 143 sectors = req->nr_sectors * priv->blocking_factor;
144 dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n", 144 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
145 __func__, __LINE__, op, sectors, start_sector); 145 __func__, __LINE__, op, sectors, start_sector);
146 146
147 if (write) { 147 if (write) {
@@ -178,7 +178,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
178 LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 178 LV1_STORAGE_ATA_HDDOUT, 0, 0, 0,
179 0, &dev->tag); 179 0, &dev->tag);
180 if (res) { 180 if (res) {
181 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n", 181 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
182 __func__, __LINE__, res); 182 __func__, __LINE__, res);
183 end_request(req, 0); 183 end_request(req, 0);
184 return 0; 184 return 0;
@@ -238,11 +238,11 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
238 238
239 if (tag != dev->tag) 239 if (tag != dev->tag)
240 dev_err(&dev->sbd.core, 240 dev_err(&dev->sbd.core,
241 "%s:%u: tag mismatch, got %lx, expected %lx\n", 241 "%s:%u: tag mismatch, got %llx, expected %llx\n",
242 __func__, __LINE__, tag, dev->tag); 242 __func__, __LINE__, tag, dev->tag);
243 243
244 if (res) { 244 if (res) {
245 dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n", 245 dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
246 __func__, __LINE__, res, status); 246 __func__, __LINE__, res, status);
247 return IRQ_HANDLED; 247 return IRQ_HANDLED;
248 } 248 }
@@ -269,7 +269,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
269 op = read ? "read" : "write"; 269 op = read ? "read" : "write";
270 } 270 }
271 if (status) { 271 if (status) {
272 dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__, 272 dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
273 __LINE__, op, status); 273 __LINE__, op, status);
274 error = -EIO; 274 error = -EIO;
275 } else { 275 } else {
@@ -297,7 +297,7 @@ static int ps3disk_sync_cache(struct ps3_storage_device *dev)
297 297
298 res = ps3stor_send_command(dev, LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0); 298 res = ps3stor_send_command(dev, LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0);
299 if (res) { 299 if (res) {
300 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n", 300 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
301 __func__, __LINE__, res); 301 __func__, __LINE__, res);
302 return -EIO; 302 return -EIO;
303 } 303 }
@@ -388,7 +388,7 @@ static int ps3disk_identify(struct ps3_storage_device *dev)
388 sizeof(ata_cmnd), ata_cmnd.buffer, 388 sizeof(ata_cmnd), ata_cmnd.buffer,
389 ata_cmnd.arglen); 389 ata_cmnd.arglen);
390 if (res) { 390 if (res) {
391 dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%lx\n", 391 dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%llx\n",
392 __func__, __LINE__, res); 392 __func__, __LINE__, res);
393 return -EIO; 393 return -EIO;
394 } 394 }
@@ -426,7 +426,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
426 426
427 if (dev->blk_size < 512) { 427 if (dev->blk_size < 512) {
428 dev_err(&dev->sbd.core, 428 dev_err(&dev->sbd.core,
429 "%s:%u: cannot handle block size %lu\n", __func__, 429 "%s:%u: cannot handle block size %llu\n", __func__,
430 __LINE__, dev->blk_size); 430 __LINE__, dev->blk_size);
431 return -EINVAL; 431 return -EINVAL;
432 } 432 }
@@ -512,7 +512,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
512 dev->regions[dev->region_idx].size*priv->blocking_factor); 512 dev->regions[dev->region_idx].size*priv->blocking_factor);
513 513
514 dev_info(&dev->sbd.core, 514 dev_info(&dev->sbd.core,
515 "%s is a %s (%lu MiB total, %lu MiB for OtherOS)\n", 515 "%s is a %s (%llu MiB total, %lu MiB for OtherOS)\n",
516 gendisk->disk_name, priv->model, priv->raw_capacity >> 11, 516 gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
517 get_capacity(gendisk) >> 11); 517 get_capacity(gendisk) >> 11);
518 518
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 09676b4e5d89..94e7e3c8c05a 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -318,8 +318,6 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
318 } /* else count == 0 */ 318 } /* else count == 0 */
319 319
320 tty->driver_data = hp; 320 tty->driver_data = hp;
321 if (!hp->irq_requested)
322 tty->low_latency = 1; /* Makes flushes to ldisc synchronous. */
323 321
324 hp->tty = tty; 322 hp->tty = tty;
325 323
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 79b6f461be75..afbe45676d71 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -44,7 +44,7 @@ static ssize_t ps3flash_read_write_sectors(struct ps3_storage_device *dev,
44 u64 res = ps3stor_read_write_sectors(dev, lpar, start_sector, sectors, 44 u64 res = ps3stor_read_write_sectors(dev, lpar, start_sector, sectors,
45 write); 45 write);
46 if (res) { 46 if (res) {
47 dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__, 47 dev_err(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
48 __LINE__, write ? "write" : "read", res); 48 __LINE__, write ? "write" : "read", res);
49 return -EIO; 49 return -EIO;
50 } 50 }
@@ -59,7 +59,7 @@ static ssize_t ps3flash_read_sectors(struct ps3_storage_device *dev,
59 59
60 max_sectors = dev->bounce_size / dev->blk_size; 60 max_sectors = dev->bounce_size / dev->blk_size;
61 if (sectors > max_sectors) { 61 if (sectors > max_sectors) {
62 dev_dbg(&dev->sbd.core, "%s:%u Limiting sectors to %lu\n", 62 dev_dbg(&dev->sbd.core, "%s:%u Limiting sectors to %llu\n",
63 __func__, __LINE__, max_sectors); 63 __func__, __LINE__, max_sectors);
64 sectors = max_sectors; 64 sectors = max_sectors;
65 } 65 }
@@ -144,7 +144,7 @@ static ssize_t ps3flash_read(struct file *file, char __user *buf, size_t count,
144 goto fail; 144 goto fail;
145 } 145 }
146 146
147 n = min(remaining, sectors_read*dev->blk_size-offset); 147 n = min_t(u64, remaining, sectors_read*dev->blk_size-offset);
148 dev_dbg(&dev->sbd.core, 148 dev_dbg(&dev->sbd.core,
149 "%s:%u: copy %lu bytes from 0x%p to user 0x%p\n", 149 "%s:%u: copy %lu bytes from 0x%p to user 0x%p\n",
150 __func__, __LINE__, n, dev->bounce_buf+offset, buf); 150 __func__, __LINE__, n, dev->bounce_buf+offset, buf);
@@ -225,7 +225,7 @@ static ssize_t ps3flash_write(struct file *file, const char __user *buf,
225 if (end_read_sector >= start_read_sector) { 225 if (end_read_sector >= start_read_sector) {
226 /* Merge head and tail */ 226 /* Merge head and tail */
227 dev_dbg(&dev->sbd.core, 227 dev_dbg(&dev->sbd.core,
228 "Merged head and tail: %lu sectors at %lu\n", 228 "Merged head and tail: %llu sectors at %llu\n",
229 chunk_sectors, start_write_sector); 229 chunk_sectors, start_write_sector);
230 res = ps3flash_read_sectors(dev, start_write_sector, 230 res = ps3flash_read_sectors(dev, start_write_sector,
231 chunk_sectors, 0); 231 chunk_sectors, 0);
@@ -235,7 +235,7 @@ static ssize_t ps3flash_write(struct file *file, const char __user *buf,
235 if (head) { 235 if (head) {
236 /* Read head */ 236 /* Read head */
237 dev_dbg(&dev->sbd.core, 237 dev_dbg(&dev->sbd.core,
238 "head: %lu sectors at %lu\n", head, 238 "head: %llu sectors at %llu\n", head,
239 start_write_sector); 239 start_write_sector);
240 res = ps3flash_read_sectors(dev, 240 res = ps3flash_read_sectors(dev,
241 start_write_sector, 241 start_write_sector,
@@ -247,7 +247,7 @@ static ssize_t ps3flash_write(struct file *file, const char __user *buf,
247 start_write_sector+chunk_sectors) { 247 start_write_sector+chunk_sectors) {
248 /* Read tail */ 248 /* Read tail */
249 dev_dbg(&dev->sbd.core, 249 dev_dbg(&dev->sbd.core,
250 "tail: %lu sectors at %lu\n", tail, 250 "tail: %llu sectors at %llu\n", tail,
251 start_read_sector); 251 start_read_sector);
252 sec_off = start_read_sector-start_write_sector; 252 sec_off = start_read_sector-start_write_sector;
253 res = ps3flash_read_sectors(dev, 253 res = ps3flash_read_sectors(dev,
@@ -258,7 +258,7 @@ static ssize_t ps3flash_write(struct file *file, const char __user *buf,
258 } 258 }
259 } 259 }
260 260
261 n = min(remaining, dev->bounce_size-offset); 261 n = min_t(u64, remaining, dev->bounce_size-offset);
262 dev_dbg(&dev->sbd.core, 262 dev_dbg(&dev->sbd.core,
263 "%s:%u: copy %lu bytes from user 0x%p to 0x%p\n", 263 "%s:%u: copy %lu bytes from user 0x%p to 0x%p\n",
264 __func__, __LINE__, n, buf, dev->bounce_buf+offset); 264 __func__, __LINE__, n, buf, dev->bounce_buf+offset);
@@ -299,11 +299,11 @@ static irqreturn_t ps3flash_interrupt(int irq, void *data)
299 299
300 if (tag != dev->tag) 300 if (tag != dev->tag)
301 dev_err(&dev->sbd.core, 301 dev_err(&dev->sbd.core,
302 "%s:%u: tag mismatch, got %lx, expected %lx\n", 302 "%s:%u: tag mismatch, got %llx, expected %llx\n",
303 __func__, __LINE__, tag, dev->tag); 303 __func__, __LINE__, tag, dev->tag);
304 304
305 if (res) { 305 if (res) {
306 dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n", 306 dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
307 __func__, __LINE__, res, status); 307 __func__, __LINE__, res, status);
308 } else { 308 } else {
309 dev->lv1_status = status; 309 dev->lv1_status = status;
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 53544e21f191..f329f459817c 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: synclink_gt.c,v 4.50 2007/07/25 19:29:25 paulkf Exp $
3 *
4 * Device driver for Microgate SyncLink GT serial adapters. 2 * Device driver for Microgate SyncLink GT serial adapters.
5 * 3 *
6 * written by Paul Fulghum for Microgate Corporation 4 * written by Paul Fulghum for Microgate Corporation
@@ -91,7 +89,6 @@
91 * module identification 89 * module identification
92 */ 90 */
93static char *driver_name = "SyncLink GT"; 91static char *driver_name = "SyncLink GT";
94static char *driver_version = "$Revision: 4.50 $";
95static char *tty_driver_name = "synclink_gt"; 92static char *tty_driver_name = "synclink_gt";
96static char *tty_dev_prefix = "ttySLG"; 93static char *tty_dev_prefix = "ttySLG";
97MODULE_LICENSE("GPL"); 94MODULE_LICENSE("GPL");
@@ -1309,7 +1306,7 @@ static int read_proc(char *page, char **start, off_t off, int count,
1309 off_t begin = 0; 1306 off_t begin = 0;
1310 struct slgt_info *info; 1307 struct slgt_info *info;
1311 1308
1312 len += sprintf(page, "synclink_gt driver:%s\n", driver_version); 1309 len += sprintf(page, "synclink_gt driver\n");
1313 1310
1314 info = slgt_device_list; 1311 info = slgt_device_list;
1315 while( info ) { 1312 while( info ) {
@@ -2441,7 +2438,7 @@ static void program_hw(struct slgt_info *info)
2441 info->ri_chkcount = 0; 2438 info->ri_chkcount = 0;
2442 info->dsr_chkcount = 0; 2439 info->dsr_chkcount = 0;
2443 2440
2444 slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR); 2441 slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
2445 get_signals(info); 2442 get_signals(info);
2446 2443
2447 if (info->netcount || 2444 if (info->netcount ||
@@ -3576,7 +3573,7 @@ static void slgt_cleanup(void)
3576 struct slgt_info *info; 3573 struct slgt_info *info;
3577 struct slgt_info *tmp; 3574 struct slgt_info *tmp;
3578 3575
3579 printk("unload %s %s\n", driver_name, driver_version); 3576 printk(KERN_INFO "unload %s\n", driver_name);
3580 3577
3581 if (serial_driver) { 3578 if (serial_driver) {
3582 for (info=slgt_device_list ; info != NULL ; info=info->next_device) 3579 for (info=slgt_device_list ; info != NULL ; info=info->next_device)
@@ -3619,7 +3616,7 @@ static int __init slgt_init(void)
3619{ 3616{
3620 int rc; 3617 int rc;
3621 3618
3622 printk("%s %s\n", driver_name, driver_version); 3619 printk(KERN_INFO "%s\n", driver_name);
3623 3620
3624 serial_driver = alloc_tty_driver(MAX_DEVICES); 3621 serial_driver = alloc_tty_driver(MAX_DEVICES);
3625 if (!serial_driver) { 3622 if (!serial_driver) {
@@ -3650,9 +3647,8 @@ static int __init slgt_init(void)
3650 goto error; 3647 goto error;
3651 } 3648 }
3652 3649
3653 printk("%s %s, tty major#%d\n", 3650 printk(KERN_INFO "%s, tty major#%d\n",
3654 driver_name, driver_version, 3651 driver_name, serial_driver->major);
3655 serial_driver->major);
3656 3652
3657 slgt_device_count = 0; 3653 slgt_device_count = 0;
3658 if ((rc = pci_register_driver(&pci_driver)) < 0) { 3654 if ((rc = pci_register_driver(&pci_driver)) < 0) {
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index d41b9f6f7903..33a9351c896d 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -473,6 +473,12 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
473 unsigned long flags; 473 unsigned long flags;
474 474
475 spin_lock_irqsave(&sysrq_key_table_lock, flags); 475 spin_lock_irqsave(&sysrq_key_table_lock, flags);
476 /*
477 * Raise the apparent loglevel to maximum so that the sysrq header
478 * is shown to provide the user with positive feedback. We do not
479 * simply emit this at KERN_EMERG as that would change message
480 * routing in the consumers of /proc/kmsg.
481 */
476 orig_log_level = console_loglevel; 482 orig_log_level = console_loglevel;
477 console_loglevel = 7; 483 console_loglevel = 7;
478 printk(KERN_INFO "SysRq : "); 484 printk(KERN_INFO "SysRq : ");
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index a408c8e487ec..6f4c7d0a53bf 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -1057,7 +1057,7 @@ int tty_perform_flush(struct tty_struct *tty, unsigned long arg)
1057 if (retval) 1057 if (retval)
1058 return retval; 1058 return retval;
1059 1059
1060 ld = tty_ldisc_ref(tty); 1060 ld = tty_ldisc_ref_wait(tty);
1061 switch (arg) { 1061 switch (arg) {
1062 case TCIFLUSH: 1062 case TCIFLUSH:
1063 if (ld && ld->ops->flush_buffer) 1063 if (ld && ld->ops->flush_buffer)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e34b06420816..48ea59e79672 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,25 @@ config MV_XOR
62 ---help--- 62 ---help---
63 Enable support for the Marvell XOR engine. 63 Enable support for the Marvell XOR engine.
64 64
65config MX3_IPU
66 bool "MX3x Image Processing Unit support"
67 depends on ARCH_MX3
68 select DMA_ENGINE
69 default y
70 help
71 If you plan to use the Image Processing unit in the i.MX3x, say
72 Y here. If unsure, select Y.
73
74config MX3_IPU_IRQS
75 int "Number of dynamically mapped interrupts for IPU"
76 depends on MX3_IPU
77 range 2 137
78 default 4
79 help
80 Out of 137 interrupt sources on i.MX31 IPU only very few are used.
81 To avoid bloating the irq_desc[] array we allocate a sufficient
82 number of IRQ slots and map them dynamically to specific sources.
83
65config DMA_ENGINE 84config DMA_ENGINE
66 bool 85 bool
67 86
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 14f59527d4f6..2e5dc96700d2 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
7obj-$(CONFIG_FSL_DMA) += fsldma.o 7obj-$(CONFIG_FSL_DMA) += fsldma.o
8obj-$(CONFIG_MV_XOR) += mv_xor.o 8obj-$(CONFIG_MV_XOR) += mv_xor.o
9obj-$(CONFIG_DW_DMAC) += dw_dmac.o 9obj-$(CONFIG_DW_DMAC) += dw_dmac.o
10obj-$(CONFIG_MX3_IPU) += ipu/
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 403dbe781122..a58993011edb 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -329,9 +329,6 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
329 struct dma_chan *chan; 329 struct dma_chan *chan;
330 int cpu; 330 int cpu;
331 331
332 WARN_ONCE(dmaengine_ref_count == 0,
333 "client called %s without a reference", __func__);
334
335 cpu = get_cpu(); 332 cpu = get_cpu();
336 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; 333 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
337 put_cpu(); 334 put_cpu();
@@ -348,9 +345,6 @@ void dma_issue_pending_all(void)
348 struct dma_device *device; 345 struct dma_device *device;
349 struct dma_chan *chan; 346 struct dma_chan *chan;
350 347
351 WARN_ONCE(dmaengine_ref_count == 0,
352 "client called %s without a reference", __func__);
353
354 rcu_read_lock(); 348 rcu_read_lock();
355 list_for_each_entry_rcu(device, &dma_device_list, global_node) { 349 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
356 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) 350 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
@@ -961,6 +955,8 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
961 if (!dep) 955 if (!dep)
962 return; 956 return;
963 957
958 /* we'll submit tx->next now, so clear the link */
959 tx->next = NULL;
964 chan = dep->chan; 960 chan = dep->chan;
965 961
966 /* keep submitting up until a channel switch is detected 962 /* keep submitting up until a channel switch is detected
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 3603f1ea5b28..732fa1ec36ab 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -217,6 +217,10 @@ static int dmatest_func(void *data)
217 chan = thread->chan; 217 chan = thread->chan;
218 218
219 while (!kthread_should_stop()) { 219 while (!kthread_should_stop()) {
220 struct dma_device *dev = chan->device;
221 struct dma_async_tx_descriptor *tx;
222 dma_addr_t dma_src, dma_dest;
223
220 total_tests++; 224 total_tests++;
221 225
222 len = dmatest_random() % test_buf_size + 1; 226 len = dmatest_random() % test_buf_size + 1;
@@ -226,10 +230,30 @@ static int dmatest_func(void *data)
226 dmatest_init_srcbuf(thread->srcbuf, src_off, len); 230 dmatest_init_srcbuf(thread->srcbuf, src_off, len);
227 dmatest_init_dstbuf(thread->dstbuf, dst_off, len); 231 dmatest_init_dstbuf(thread->dstbuf, dst_off, len);
228 232
229 cookie = dma_async_memcpy_buf_to_buf(chan, 233 dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off,
230 thread->dstbuf + dst_off, 234 len, DMA_TO_DEVICE);
231 thread->srcbuf + src_off, 235 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
232 len); 236 dma_dest = dma_map_single(dev->dev, thread->dstbuf,
237 test_buf_size, DMA_BIDIRECTIONAL);
238
239 tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off,
240 dma_src, len,
241 DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP);
242 if (!tx) {
243 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
244 dma_unmap_single(dev->dev, dma_dest,
245 test_buf_size, DMA_BIDIRECTIONAL);
246 pr_warning("%s: #%u: prep error with src_off=0x%x "
247 "dst_off=0x%x len=0x%x\n",
248 thread_name, total_tests - 1,
249 src_off, dst_off, len);
250 msleep(100);
251 failed_tests++;
252 continue;
253 }
254 tx->callback = NULL;
255 cookie = tx->tx_submit(tx);
256
233 if (dma_submit_error(cookie)) { 257 if (dma_submit_error(cookie)) {
234 pr_warning("%s: #%u: submit error %d with src_off=0x%x " 258 pr_warning("%s: #%u: submit error %d with src_off=0x%x "
235 "dst_off=0x%x len=0x%x\n", 259 "dst_off=0x%x len=0x%x\n",
@@ -253,6 +277,9 @@ static int dmatest_func(void *data)
253 failed_tests++; 277 failed_tests++;
254 continue; 278 continue;
255 } 279 }
280 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */
281 dma_unmap_single(dev->dev, dma_dest,
282 test_buf_size, DMA_BIDIRECTIONAL);
256 283
257 error_count = 0; 284 error_count = 0;
258 285
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index ca70a21afc68..70126a606239 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -822,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
822 */ 822 */
823 WARN_ON(fdev->feature != new_fsl_chan->feature); 823 WARN_ON(fdev->feature != new_fsl_chan->feature);
824 824
825 new_fsl_chan->dev = &new_fsl_chan->common.dev->device; 825 new_fsl_chan->dev = fdev->dev;
826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 826 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 827 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
828 828
@@ -875,7 +875,8 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
875 } 875 }
876 876
877 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 877 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
878 compatible, new_fsl_chan->irq); 878 compatible,
879 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
879 880
880 return 0; 881 return 0;
881 882
@@ -890,7 +891,8 @@ err_no_reg:
890 891
891static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) 892static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
892{ 893{
893 free_irq(fchan->irq, fchan); 894 if (fchan->irq != NO_IRQ)
895 free_irq(fchan->irq, fchan);
894 list_del(&fchan->common.device_node); 896 list_del(&fchan->common.device_node);
895 iounmap(fchan->reg_base); 897 iounmap(fchan->reg_base);
896 kfree(fchan); 898 kfree(fchan);
diff --git a/drivers/dma/ipu/Makefile b/drivers/dma/ipu/Makefile
new file mode 100644
index 000000000000..6704cf48326d
--- /dev/null
+++ b/drivers/dma/ipu/Makefile
@@ -0,0 +1 @@
obj-y += ipu_irq.o ipu_idmac.o
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
new file mode 100644
index 000000000000..1f154d08e98f
--- /dev/null
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -0,0 +1,1740 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/err.h>
15#include <linux/spinlock.h>
16#include <linux/delay.h>
17#include <linux/list.h>
18#include <linux/clk.h>
19#include <linux/vmalloc.h>
20#include <linux/string.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23
24#include <mach/ipu.h>
25
26#include "ipu_intern.h"
27
28#define FS_VF_IN_VALID 0x00000002
29#define FS_ENC_IN_VALID 0x00000001
30
31/*
32 * There can be only one, we could allocate it dynamically, but then we'd have
33 * to add an extra parameter to some functions, and use something as ugly as
34 * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device));
35 * in the ISR
36 */
37static struct ipu ipu_data;
38
39#define to_ipu(id) container_of(id, struct ipu, idmac)
40
41static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg)
42{
43 return __raw_readl(ipu->reg_ic + reg);
44}
45
46#define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
47
48static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg)
49{
50 __raw_writel(value, ipu->reg_ic + reg);
51}
52
53#define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
54
55static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg)
56{
57 return __raw_readl(ipu->reg_ipu + reg);
58}
59
60static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg)
61{
62 __raw_writel(value, ipu->reg_ipu + reg);
63}
64
65/*****************************************************************************
66 * IPU / IC common functions
67 */
68static void dump_idmac_reg(struct ipu *ipu)
69{
70 dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
71 "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
72 idmac_read_icreg(ipu, IDMAC_CONF),
73 idmac_read_icreg(ipu, IC_CONF),
74 idmac_read_icreg(ipu, IDMAC_CHA_EN),
75 idmac_read_icreg(ipu, IDMAC_CHA_PRI),
76 idmac_read_icreg(ipu, IDMAC_CHA_BUSY));
77 dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
78 "DB_MODE 0x%x, TASKS_STAT 0x%x\n",
79 idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
80 idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
81 idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF),
82 idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL),
83 idmac_read_ipureg(ipu, IPU_TASKS_STAT));
84}
85
86static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
87{
88 switch (fmt) {
89 case IPU_PIX_FMT_GENERIC: /* generic data */
90 case IPU_PIX_FMT_RGB332:
91 case IPU_PIX_FMT_YUV420P:
92 case IPU_PIX_FMT_YUV422P:
93 default:
94 return 1;
95 case IPU_PIX_FMT_RGB565:
96 case IPU_PIX_FMT_YUYV:
97 case IPU_PIX_FMT_UYVY:
98 return 2;
99 case IPU_PIX_FMT_BGR24:
100 case IPU_PIX_FMT_RGB24:
101 return 3;
102 case IPU_PIX_FMT_GENERIC_32: /* generic data */
103 case IPU_PIX_FMT_BGR32:
104 case IPU_PIX_FMT_RGB32:
105 case IPU_PIX_FMT_ABGR32:
106 return 4;
107 }
108}
109
110/* Enable / disable direct write to memory by the Camera Sensor Interface */
111static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
112{
113 uint32_t ic_conf, mask;
114
115 switch (channel) {
116 case IDMAC_IC_0:
117 mask = IC_CONF_PRPENC_EN;
118 break;
119 case IDMAC_IC_7:
120 mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
121 break;
122 default:
123 return;
124 }
125 ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask;
126 idmac_write_icreg(ipu, ic_conf, IC_CONF);
127}
128
129static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
130{
131 uint32_t ic_conf, mask;
132
133 switch (channel) {
134 case IDMAC_IC_0:
135 mask = IC_CONF_PRPENC_EN;
136 break;
137 case IDMAC_IC_7:
138 mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
139 break;
140 default:
141 return;
142 }
143 ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask;
144 idmac_write_icreg(ipu, ic_conf, IC_CONF);
145}
146
147static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel)
148{
149 uint32_t stat = TASK_STAT_IDLE;
150 uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT);
151
152 switch (channel) {
153 case IDMAC_IC_7:
154 stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >>
155 TSTAT_CSI2MEM_OFFSET;
156 break;
157 case IDMAC_IC_0:
158 case IDMAC_SDC_0:
159 case IDMAC_SDC_1:
160 default:
161 break;
162 }
163 return stat;
164}
165
166struct chan_param_mem_planar {
167 /* Word 0 */
168 u32 xv:10;
169 u32 yv:10;
170 u32 xb:12;
171
172 u32 yb:12;
173 u32 res1:2;
174 u32 nsb:1;
175 u32 lnpb:6;
176 u32 ubo_l:11;
177
178 u32 ubo_h:15;
179 u32 vbo_l:17;
180
181 u32 vbo_h:9;
182 u32 res2:3;
183 u32 fw:12;
184 u32 fh_l:8;
185
186 u32 fh_h:4;
187 u32 res3:28;
188
189 /* Word 1 */
190 u32 eba0;
191
192 u32 eba1;
193
194 u32 bpp:3;
195 u32 sl:14;
196 u32 pfs:3;
197 u32 bam:3;
198 u32 res4:2;
199 u32 npb:6;
200 u32 res5:1;
201
202 u32 sat:2;
203 u32 res6:30;
204} __attribute__ ((packed));
205
206struct chan_param_mem_interleaved {
207 /* Word 0 */
208 u32 xv:10;
209 u32 yv:10;
210 u32 xb:12;
211
212 u32 yb:12;
213 u32 sce:1;
214 u32 res1:1;
215 u32 nsb:1;
216 u32 lnpb:6;
217 u32 sx:10;
218 u32 sy_l:1;
219
220 u32 sy_h:9;
221 u32 ns:10;
222 u32 sm:10;
223 u32 sdx_l:3;
224
225 u32 sdx_h:2;
226 u32 sdy:5;
227 u32 sdrx:1;
228 u32 sdry:1;
229 u32 sdr1:1;
230 u32 res2:2;
231 u32 fw:12;
232 u32 fh_l:8;
233
234 u32 fh_h:4;
235 u32 res3:28;
236
237 /* Word 1 */
238 u32 eba0;
239
240 u32 eba1;
241
242 u32 bpp:3;
243 u32 sl:14;
244 u32 pfs:3;
245 u32 bam:3;
246 u32 res4:2;
247 u32 npb:6;
248 u32 res5:1;
249
250 u32 sat:2;
251 u32 scc:1;
252 u32 ofs0:5;
253 u32 ofs1:5;
254 u32 ofs2:5;
255 u32 ofs3:5;
256 u32 wid0:3;
257 u32 wid1:3;
258 u32 wid2:3;
259
260 u32 wid3:3;
261 u32 dec_sel:1;
262 u32 res6:28;
263} __attribute__ ((packed));
264
265union chan_param_mem {
266 struct chan_param_mem_planar pp;
267 struct chan_param_mem_interleaved ip;
268};
269
270static void ipu_ch_param_set_plane_offset(union chan_param_mem *params,
271 u32 u_offset, u32 v_offset)
272{
273 params->pp.ubo_l = u_offset & 0x7ff;
274 params->pp.ubo_h = u_offset >> 11;
275 params->pp.vbo_l = v_offset & 0x1ffff;
276 params->pp.vbo_h = v_offset >> 17;
277}
278
279static void ipu_ch_param_set_size(union chan_param_mem *params,
280 uint32_t pixel_fmt, uint16_t width,
281 uint16_t height, uint16_t stride)
282{
283 u32 u_offset;
284 u32 v_offset;
285
286 params->pp.fw = width - 1;
287 params->pp.fh_l = height - 1;
288 params->pp.fh_h = (height - 1) >> 8;
289 params->pp.sl = stride - 1;
290
291 switch (pixel_fmt) {
292 case IPU_PIX_FMT_GENERIC:
293 /*Represents 8-bit Generic data */
294 params->pp.bpp = 3;
295 params->pp.pfs = 7;
296 params->pp.npb = 31;
297 params->pp.sat = 2; /* SAT = use 32-bit access */
298 break;
299 case IPU_PIX_FMT_GENERIC_32:
300 /*Represents 32-bit Generic data */
301 params->pp.bpp = 0;
302 params->pp.pfs = 7;
303 params->pp.npb = 7;
304 params->pp.sat = 2; /* SAT = use 32-bit access */
305 break;
306 case IPU_PIX_FMT_RGB565:
307 params->ip.bpp = 2;
308 params->ip.pfs = 4;
309 params->ip.npb = 7;
310 params->ip.sat = 2; /* SAT = 32-bit access */
311 params->ip.ofs0 = 0; /* Red bit offset */
312 params->ip.ofs1 = 5; /* Green bit offset */
313 params->ip.ofs2 = 11; /* Blue bit offset */
314 params->ip.ofs3 = 16; /* Alpha bit offset */
315 params->ip.wid0 = 4; /* Red bit width - 1 */
316 params->ip.wid1 = 5; /* Green bit width - 1 */
317 params->ip.wid2 = 4; /* Blue bit width - 1 */
318 break;
319 case IPU_PIX_FMT_BGR24:
320 params->ip.bpp = 1; /* 24 BPP & RGB PFS */
321 params->ip.pfs = 4;
322 params->ip.npb = 7;
323 params->ip.sat = 2; /* SAT = 32-bit access */
324 params->ip.ofs0 = 0; /* Red bit offset */
325 params->ip.ofs1 = 8; /* Green bit offset */
326 params->ip.ofs2 = 16; /* Blue bit offset */
327 params->ip.ofs3 = 24; /* Alpha bit offset */
328 params->ip.wid0 = 7; /* Red bit width - 1 */
329 params->ip.wid1 = 7; /* Green bit width - 1 */
330 params->ip.wid2 = 7; /* Blue bit width - 1 */
331 break;
332 case IPU_PIX_FMT_RGB24:
333 params->ip.bpp = 1; /* 24 BPP & RGB PFS */
334 params->ip.pfs = 4;
335 params->ip.npb = 7;
336 params->ip.sat = 2; /* SAT = 32-bit access */
337 params->ip.ofs0 = 16; /* Red bit offset */
338 params->ip.ofs1 = 8; /* Green bit offset */
339 params->ip.ofs2 = 0; /* Blue bit offset */
340 params->ip.ofs3 = 24; /* Alpha bit offset */
341 params->ip.wid0 = 7; /* Red bit width - 1 */
342 params->ip.wid1 = 7; /* Green bit width - 1 */
343 params->ip.wid2 = 7; /* Blue bit width - 1 */
344 break;
345 case IPU_PIX_FMT_BGRA32:
346 case IPU_PIX_FMT_BGR32:
347 params->ip.bpp = 0;
348 params->ip.pfs = 4;
349 params->ip.npb = 7;
350 params->ip.sat = 2; /* SAT = 32-bit access */
351 params->ip.ofs0 = 8; /* Red bit offset */
352 params->ip.ofs1 = 16; /* Green bit offset */
353 params->ip.ofs2 = 24; /* Blue bit offset */
354 params->ip.ofs3 = 0; /* Alpha bit offset */
355 params->ip.wid0 = 7; /* Red bit width - 1 */
356 params->ip.wid1 = 7; /* Green bit width - 1 */
357 params->ip.wid2 = 7; /* Blue bit width - 1 */
358 params->ip.wid3 = 7; /* Alpha bit width - 1 */
359 break;
360 case IPU_PIX_FMT_RGBA32:
361 case IPU_PIX_FMT_RGB32:
362 params->ip.bpp = 0;
363 params->ip.pfs = 4;
364 params->ip.npb = 7;
365 params->ip.sat = 2; /* SAT = 32-bit access */
366 params->ip.ofs0 = 24; /* Red bit offset */
367 params->ip.ofs1 = 16; /* Green bit offset */
368 params->ip.ofs2 = 8; /* Blue bit offset */
369 params->ip.ofs3 = 0; /* Alpha bit offset */
370 params->ip.wid0 = 7; /* Red bit width - 1 */
371 params->ip.wid1 = 7; /* Green bit width - 1 */
372 params->ip.wid2 = 7; /* Blue bit width - 1 */
373 params->ip.wid3 = 7; /* Alpha bit width - 1 */
374 break;
375 case IPU_PIX_FMT_ABGR32:
376 params->ip.bpp = 0;
377 params->ip.pfs = 4;
378 params->ip.npb = 7;
379 params->ip.sat = 2; /* SAT = 32-bit access */
380 params->ip.ofs0 = 8; /* Red bit offset */
381 params->ip.ofs1 = 16; /* Green bit offset */
382 params->ip.ofs2 = 24; /* Blue bit offset */
383 params->ip.ofs3 = 0; /* Alpha bit offset */
384 params->ip.wid0 = 7; /* Red bit width - 1 */
385 params->ip.wid1 = 7; /* Green bit width - 1 */
386 params->ip.wid2 = 7; /* Blue bit width - 1 */
387 params->ip.wid3 = 7; /* Alpha bit width - 1 */
388 break;
389 case IPU_PIX_FMT_UYVY:
390 params->ip.bpp = 2;
391 params->ip.pfs = 6;
392 params->ip.npb = 7;
393 params->ip.sat = 2; /* SAT = 32-bit access */
394 break;
395 case IPU_PIX_FMT_YUV420P2:
396 case IPU_PIX_FMT_YUV420P:
397 params->ip.bpp = 3;
398 params->ip.pfs = 3;
399 params->ip.npb = 7;
400 params->ip.sat = 2; /* SAT = 32-bit access */
401 u_offset = stride * height;
402 v_offset = u_offset + u_offset / 4;
403 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
404 break;
405 case IPU_PIX_FMT_YVU422P:
406 params->ip.bpp = 3;
407 params->ip.pfs = 2;
408 params->ip.npb = 7;
409 params->ip.sat = 2; /* SAT = 32-bit access */
410 v_offset = stride * height;
411 u_offset = v_offset + v_offset / 2;
412 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
413 break;
414 case IPU_PIX_FMT_YUV422P:
415 params->ip.bpp = 3;
416 params->ip.pfs = 2;
417 params->ip.npb = 7;
418 params->ip.sat = 2; /* SAT = 32-bit access */
419 u_offset = stride * height;
420 v_offset = u_offset + u_offset / 2;
421 ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
422 break;
423 default:
424 dev_err(ipu_data.dev,
425 "mxc ipu: unimplemented pixel format %d\n", pixel_fmt);
426 break;
427 }
428
429 params->pp.nsb = 1;
430}
431
432static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
433 uint16_t burst_pixels)
434{
435 params->pp.npb = burst_pixels - 1;
436};
437
438static void ipu_ch_param_set_buffer(union chan_param_mem *params,
439 dma_addr_t buf0, dma_addr_t buf1)
440{
441 params->pp.eba0 = buf0;
442 params->pp.eba1 = buf1;
443};
444
445static void ipu_ch_param_set_rotation(union chan_param_mem *params,
446 enum ipu_rotate_mode rotate)
447{
448 params->pp.bam = rotate;
449};
450
451static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
452 uint32_t num_words)
453{
454 for (; num_words > 0; num_words--) {
455 dev_dbg(ipu_data.dev,
456 "write param mem - addr = 0x%08X, data = 0x%08X\n",
457 addr, *data);
458 idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR);
459 idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA);
460 addr++;
461 if ((addr & 0x7) == 5) {
462 addr &= ~0x7; /* set to word 0 */
463 addr += 8; /* increment to next row */
464 }
465 }
466}
467
468static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size,
469 uint32_t *resize_coeff,
470 uint32_t *downsize_coeff)
471{
472 uint32_t temp_size;
473 uint32_t temp_downsize;
474
475 *resize_coeff = 1 << 13;
476 *downsize_coeff = 1 << 13;
477
478 /* Cannot downsize more than 8:1 */
479 if (out_size << 3 < in_size)
480 return -EINVAL;
481
482 /* compute downsizing coefficient */
483 temp_downsize = 0;
484 temp_size = in_size;
485 while (temp_size >= out_size * 2 && temp_downsize < 2) {
486 temp_size >>= 1;
487 temp_downsize++;
488 }
489 *downsize_coeff = temp_downsize;
490
491 /*
492 * compute resizing coefficient using the following formula:
493 * resize_coeff = M*(SI -1)/(SO - 1)
494 * where M = 2^13, SI - input size, SO - output size
495 */
496 *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
497 if (*resize_coeff >= 16384L) {
498 dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n");
499 *resize_coeff = 0x3FFF;
500 }
501
502 dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, "
503 "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
504 *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
505 ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
506
507 return 0;
508}
509
510static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt)
511{
512 switch (fmt) {
513 case IPU_PIX_FMT_RGB565:
514 case IPU_PIX_FMT_BGR24:
515 case IPU_PIX_FMT_RGB24:
516 case IPU_PIX_FMT_BGR32:
517 case IPU_PIX_FMT_RGB32:
518 return IPU_COLORSPACE_RGB;
519 default:
520 return IPU_COLORSPACE_YCBCR;
521 }
522}
523
524static int ipu_ic_init_prpenc(struct ipu *ipu,
525 union ipu_channel_param *params, bool src_is_csi)
526{
527 uint32_t reg, ic_conf;
528 uint32_t downsize_coeff, resize_coeff;
529 enum ipu_color_space in_fmt, out_fmt;
530
531 /* Setup vertical resizing */
532 calc_resize_coeffs(params->video.in_height,
533 params->video.out_height,
534 &resize_coeff, &downsize_coeff);
535 reg = (downsize_coeff << 30) | (resize_coeff << 16);
536
537 /* Setup horizontal resizing */
538 calc_resize_coeffs(params->video.in_width,
539 params->video.out_width,
540 &resize_coeff, &downsize_coeff);
541 reg |= (downsize_coeff << 14) | resize_coeff;
542
543 /* Setup color space conversion */
544 in_fmt = format_to_colorspace(params->video.in_pixel_fmt);
545 out_fmt = format_to_colorspace(params->video.out_pixel_fmt);
546
547 /*
548 * Colourspace conversion unsupported yet - see _init_csc() in
549 * Freescale sources
550 */
551 if (in_fmt != out_fmt) {
552 dev_err(ipu->dev, "Colourspace conversion unsupported!\n");
553 return -EOPNOTSUPP;
554 }
555
556 idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC);
557
558 ic_conf = idmac_read_icreg(ipu, IC_CONF);
559
560 if (src_is_csi)
561 ic_conf &= ~IC_CONF_RWS_EN;
562 else
563 ic_conf |= IC_CONF_RWS_EN;
564
565 idmac_write_icreg(ipu, ic_conf, IC_CONF);
566
567 return 0;
568}
569
570static uint32_t dma_param_addr(uint32_t dma_ch)
571{
572 /* Channel Parameter Memory */
573 return 0x10000 | (dma_ch << 4);
574};
575
576static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
577 bool prio)
578{
579 u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI);
580
581 if (prio)
582 reg |= 1UL << channel;
583 else
584 reg &= ~(1UL << channel);
585
586 idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI);
587
588 dump_idmac_reg(ipu);
589}
590
591static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
592{
593 uint32_t mask;
594
595 switch (channel) {
596 case IDMAC_IC_0:
597 case IDMAC_IC_7:
598 mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN;
599 break;
600 case IDMAC_SDC_0:
601 case IDMAC_SDC_1:
602 mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN;
603 break;
604 default:
605 mask = 0;
606 break;
607 }
608
609 return mask;
610}
611
612/**
613 * ipu_enable_channel() - enable an IPU channel.
614 * @channel: channel ID.
615 * @return: 0 on success or negative error code on failure.
616 */
617static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
618{
619 struct ipu *ipu = to_ipu(idmac);
620 enum ipu_channel channel = ichan->dma_chan.chan_id;
621 uint32_t reg;
622 unsigned long flags;
623
624 spin_lock_irqsave(&ipu->lock, flags);
625
626 /* Reset to buffer 0 */
627 idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF);
628 ichan->active_buffer = 0;
629 ichan->status = IPU_CHANNEL_ENABLED;
630
631 switch (channel) {
632 case IDMAC_SDC_0:
633 case IDMAC_SDC_1:
634 case IDMAC_IC_7:
635 ipu_channel_set_priority(ipu, channel, true);
636 default:
637 break;
638 }
639
640 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
641
642 idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN);
643
644 ipu_ic_enable_task(ipu, channel);
645
646 spin_unlock_irqrestore(&ipu->lock, flags);
647 return 0;
648}
649
650/**
651 * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
652 * @channel: channel ID.
653 * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
654 * @width: width of buffer in pixels.
655 * @height: height of buffer in pixels.
656 * @stride: stride length of buffer in pixels.
657 * @rot_mode: rotation mode of buffer. A rotation setting other than
658 * IPU_ROTATE_VERT_FLIP should only be used for input buffers of
659 * rotation channels.
660 * @phyaddr_0: buffer 0 physical address.
661 * @phyaddr_1: buffer 1 physical address. Setting this to a value other than
662 * NULL enables double buffering mode.
663 * @return: 0 on success or negative error code on failure.
664 */
665static int ipu_init_channel_buffer(struct idmac_channel *ichan,
666 enum pixel_fmt pixel_fmt,
667 uint16_t width, uint16_t height,
668 uint32_t stride,
669 enum ipu_rotate_mode rot_mode,
670 dma_addr_t phyaddr_0, dma_addr_t phyaddr_1)
671{
672 enum ipu_channel channel = ichan->dma_chan.chan_id;
673 struct idmac *idmac = to_idmac(ichan->dma_chan.device);
674 struct ipu *ipu = to_ipu(idmac);
675 union chan_param_mem params = {};
676 unsigned long flags;
677 uint32_t reg;
678 uint32_t stride_bytes;
679
680 stride_bytes = stride * bytes_per_pixel(pixel_fmt);
681
682 if (stride_bytes % 4) {
683 dev_err(ipu->dev,
684 "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
685 stride, stride_bytes);
686 return -EINVAL;
687 }
688
689 /* IC channel's stride must be a multiple of 8 pixels */
690 if ((channel <= 13) && (stride % 8)) {
691 dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
692 return -EINVAL;
693 }
694
695 /* Build parameter memory data for DMA channel */
696 ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
697 ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
698 ipu_ch_param_set_rotation(&params, rot_mode);
699 /* Some channels (rotation) have restriction on burst length */
700 switch (channel) {
701 case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
702 invalid - Table 44-30 */
703/*
704 ipu_ch_param_set_burst_size(&params, 8);
705 */
706 break;
707 case IDMAC_SDC_0:
708 case IDMAC_SDC_1:
709 /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
710 ipu_ch_param_set_burst_size(&params, 16);
711 break;
712 case IDMAC_IC_0:
713 default:
714 break;
715 }
716
717 spin_lock_irqsave(&ipu->lock, flags);
718
719 ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)&params, 10);
720
721 reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
722
723 if (phyaddr_1)
724 reg |= 1UL << channel;
725 else
726 reg &= ~(1UL << channel);
727
728 idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL);
729
730 ichan->status = IPU_CHANNEL_READY;
731
732 spin_unlock_irqrestore(ipu->lock, flags);
733
734 return 0;
735}
736
737/**
738 * ipu_select_buffer() - mark a channel's buffer as ready.
739 * @channel: channel ID.
740 * @buffer_n: buffer number to mark ready.
741 */
742static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
743{
744 /* No locking - this is a write-one-to-set register, cleared by IPU */
745 if (buffer_n == 0)
746 /* Mark buffer 0 as ready. */
747 idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY);
748 else
749 /* Mark buffer 1 as ready. */
750 idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY);
751}
752
753/**
754 * ipu_update_channel_buffer() - update physical address of a channel buffer.
755 * @channel: channel ID.
756 * @buffer_n: buffer number to update.
757 * 0 or 1 are the only valid values.
758 * @phyaddr: buffer physical address.
759 * @return: Returns 0 on success or negative error code on failure. This
760 * function will fail if the buffer is set to ready.
761 */
762/* Called under spin_lock(_irqsave)(&ichan->lock) */
763static int ipu_update_channel_buffer(enum ipu_channel channel,
764 int buffer_n, dma_addr_t phyaddr)
765{
766 uint32_t reg;
767 unsigned long flags;
768
769 spin_lock_irqsave(&ipu_data.lock, flags);
770
771 if (buffer_n == 0) {
772 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
773 if (reg & (1UL << channel)) {
774 spin_unlock_irqrestore(&ipu_data.lock, flags);
775 return -EACCES;
776 }
777
778 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
779 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
780 0x0008UL, IPU_IMA_ADDR);
781 idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
782 } else {
783 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
784 if (reg & (1UL << channel)) {
785 spin_unlock_irqrestore(&ipu_data.lock, flags);
786 return -EACCES;
787 }
788
789 /* Check if double-buffering is already enabled */
790 reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL);
791
792 if (!(reg & (1UL << channel)))
793 idmac_write_ipureg(&ipu_data, reg | (1UL << channel),
794 IPU_CHA_DB_MODE_SEL);
795
796 /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */
797 idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
798 0x0009UL, IPU_IMA_ADDR);
799 idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
800 }
801
802 spin_unlock_irqrestore(&ipu_data.lock, flags);
803
804 return 0;
805}
806
807/* Called under spin_lock_irqsave(&ichan->lock) */
808static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
809 struct idmac_tx_desc *desc)
810{
811 struct scatterlist *sg;
812 int i, ret = 0;
813
814 for (i = 0, sg = desc->sg; i < 2 && sg; i++) {
815 if (!ichan->sg[i]) {
816 ichan->sg[i] = sg;
817
818 /*
819 * On first invocation this shouldn't be necessary, the
820 * call to ipu_init_channel_buffer() above will set
821 * addresses for us, so we could make it conditional
822 * on status >= IPU_CHANNEL_ENABLED, but doing it again
823 * shouldn't hurt either.
824 */
825 ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i,
826 sg_dma_address(sg));
827 if (ret < 0)
828 return ret;
829
830 ipu_select_buffer(ichan->dma_chan.chan_id, i);
831
832 sg = sg_next(sg);
833 }
834 }
835
836 return ret;
837}
838
839static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
840{
841 struct idmac_tx_desc *desc = to_tx_desc(tx);
842 struct idmac_channel *ichan = to_idmac_chan(tx->chan);
843 struct idmac *idmac = to_idmac(tx->chan->device);
844 struct ipu *ipu = to_ipu(idmac);
845 dma_cookie_t cookie;
846 unsigned long flags;
847
848 /* Sanity check */
849 if (!list_empty(&desc->list)) {
850 /* The descriptor doesn't belong to client */
851 dev_err(&ichan->dma_chan.dev->device,
852 "Descriptor %p not prepared!\n", tx);
853 return -EBUSY;
854 }
855
856 mutex_lock(&ichan->chan_mutex);
857
858 if (ichan->status < IPU_CHANNEL_READY) {
859 struct idmac_video_param *video = &ichan->params.video;
860 /*
861 * Initial buffer assignment - the first two sg-entries from
862 * the descriptor will end up in the IDMAC buffers
863 */
864 dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 :
865 sg_dma_address(&desc->sg[1]);
866
867 WARN_ON(ichan->sg[0] || ichan->sg[1]);
868
869 cookie = ipu_init_channel_buffer(ichan,
870 video->out_pixel_fmt,
871 video->out_width,
872 video->out_height,
873 video->out_stride,
874 IPU_ROTATE_NONE,
875 sg_dma_address(&desc->sg[0]),
876 dma_1);
877 if (cookie < 0)
878 goto out;
879 }
880
881 /* ipu->lock can be taken under ichan->lock, but not v.v. */
882 spin_lock_irqsave(&ichan->lock, flags);
883
884 /* submit_buffers() atomically verifies and fills empty sg slots */
885 cookie = ipu_submit_channel_buffers(ichan, desc);
886
887 spin_unlock_irqrestore(&ichan->lock, flags);
888
889 if (cookie < 0)
890 goto out;
891
892 cookie = ichan->dma_chan.cookie;
893
894 if (++cookie < 0)
895 cookie = 1;
896
897 /* from dmaengine.h: "last cookie value returned to client" */
898 ichan->dma_chan.cookie = cookie;
899 tx->cookie = cookie;
900 spin_lock_irqsave(&ichan->lock, flags);
901 list_add_tail(&desc->list, &ichan->queue);
902 spin_unlock_irqrestore(&ichan->lock, flags);
903
904 if (ichan->status < IPU_CHANNEL_ENABLED) {
905 int ret = ipu_enable_channel(idmac, ichan);
906 if (ret < 0) {
907 cookie = ret;
908 spin_lock_irqsave(&ichan->lock, flags);
909 list_del_init(&desc->list);
910 spin_unlock_irqrestore(&ichan->lock, flags);
911 tx->cookie = cookie;
912 ichan->dma_chan.cookie = cookie;
913 }
914 }
915
916 dump_idmac_reg(ipu);
917
918out:
919 mutex_unlock(&ichan->chan_mutex);
920
921 return cookie;
922}
923
924/* Called with ichan->chan_mutex held */
925static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
926{
927 struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc));
928 struct idmac *idmac = to_idmac(ichan->dma_chan.device);
929
930 if (!desc)
931 return -ENOMEM;
932
933 /* No interrupts, just disable the tasklet for a moment */
934 tasklet_disable(&to_ipu(idmac)->tasklet);
935
936 ichan->n_tx_desc = n;
937 ichan->desc = desc;
938 INIT_LIST_HEAD(&ichan->queue);
939 INIT_LIST_HEAD(&ichan->free_list);
940
941 while (n--) {
942 struct dma_async_tx_descriptor *txd = &desc->txd;
943
944 memset(txd, 0, sizeof(*txd));
945 dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
946 txd->tx_submit = idmac_tx_submit;
947 txd->chan = &ichan->dma_chan;
948 INIT_LIST_HEAD(&txd->tx_list);
949
950 list_add(&desc->list, &ichan->free_list);
951
952 desc++;
953 }
954
955 tasklet_enable(&to_ipu(idmac)->tasklet);
956
957 return 0;
958}
959
960/**
961 * ipu_init_channel() - initialize an IPU channel.
962 * @idmac: IPU DMAC context.
963 * @ichan: pointer to the channel object.
964 * @return 0 on success or negative error code on failure.
965 */
966static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
967{
968 union ipu_channel_param *params = &ichan->params;
969 uint32_t ipu_conf;
970 enum ipu_channel channel = ichan->dma_chan.chan_id;
971 unsigned long flags;
972 uint32_t reg;
973 struct ipu *ipu = to_ipu(idmac);
974 int ret = 0, n_desc = 0;
975
976 dev_dbg(ipu->dev, "init channel = %d\n", channel);
977
978 if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 &&
979 channel != IDMAC_IC_7)
980 return -EINVAL;
981
982 spin_lock_irqsave(&ipu->lock, flags);
983
984 switch (channel) {
985 case IDMAC_IC_7:
986 n_desc = 16;
987 reg = idmac_read_icreg(ipu, IC_CONF);
988 idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF);
989 break;
990 case IDMAC_IC_0:
991 n_desc = 16;
992 reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW);
993 idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW);
994 ret = ipu_ic_init_prpenc(ipu, params, true);
995 break;
996 case IDMAC_SDC_0:
997 case IDMAC_SDC_1:
998 n_desc = 4;
999 default:
1000 break;
1001 }
1002
1003 ipu->channel_init_mask |= 1L << channel;
1004
1005 /* Enable IPU sub module */
1006 ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) |
1007 ipu_channel_conf_mask(channel);
1008 idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
1009
1010 spin_unlock_irqrestore(&ipu->lock, flags);
1011
1012 if (n_desc && !ichan->desc)
1013 ret = idmac_desc_alloc(ichan, n_desc);
1014
1015 dump_idmac_reg(ipu);
1016
1017 return ret;
1018}
1019
1020/**
1021 * ipu_uninit_channel() - uninitialize an IPU channel.
1022 * @idmac: IPU DMAC context.
1023 * @ichan: pointer to the channel object.
1024 */
1025static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan)
1026{
1027 enum ipu_channel channel = ichan->dma_chan.chan_id;
1028 unsigned long flags;
1029 uint32_t reg;
1030 unsigned long chan_mask = 1UL << channel;
1031 uint32_t ipu_conf;
1032 struct ipu *ipu = to_ipu(idmac);
1033
1034 spin_lock_irqsave(&ipu->lock, flags);
1035
1036 if (!(ipu->channel_init_mask & chan_mask)) {
1037 dev_err(ipu->dev, "Channel already uninitialized %d\n",
1038 channel);
1039 spin_unlock_irqrestore(&ipu->lock, flags);
1040 return;
1041 }
1042
1043 /* Reset the double buffer */
1044 reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
1045 idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL);
1046
1047 ichan->sec_chan_en = false;
1048
1049 switch (channel) {
1050 case IDMAC_IC_7:
1051 reg = idmac_read_icreg(ipu, IC_CONF);
1052 idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN),
1053 IC_CONF);
1054 break;
1055 case IDMAC_IC_0:
1056 reg = idmac_read_icreg(ipu, IC_CONF);
1057 idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1),
1058 IC_CONF);
1059 break;
1060 case IDMAC_SDC_0:
1061 case IDMAC_SDC_1:
1062 default:
1063 break;
1064 }
1065
1066 ipu->channel_init_mask &= ~(1L << channel);
1067
1068 ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) &
1069 ~ipu_channel_conf_mask(channel);
1070 idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
1071
1072 spin_unlock_irqrestore(&ipu->lock, flags);
1073
1074 ichan->n_tx_desc = 0;
1075 vfree(ichan->desc);
1076 ichan->desc = NULL;
1077}
1078
1079/**
1080 * ipu_disable_channel() - disable an IPU channel.
1081 * @idmac: IPU DMAC context.
1082 * @ichan: channel object pointer.
1083 * @wait_for_stop: flag to set whether to wait for channel end of frame or
1084 * return immediately.
1085 * @return: 0 on success or negative error code on failure.
1086 */
1087static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1088 bool wait_for_stop)
1089{
1090 enum ipu_channel channel = ichan->dma_chan.chan_id;
1091 struct ipu *ipu = to_ipu(idmac);
1092 uint32_t reg;
1093 unsigned long flags;
1094 unsigned long chan_mask = 1UL << channel;
1095 unsigned int timeout;
1096
1097 if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) {
1098 timeout = 40;
1099 /* This waiting always fails. Related to spurious irq problem */
1100 while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) ||
1101 (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) {
1102 timeout--;
1103 msleep(10);
1104
1105 if (!timeout) {
1106 dev_dbg(ipu->dev,
1107 "Warning: timeout waiting for channel %u to "
1108 "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
1109 "busy = 0x%08X, tstat = 0x%08X\n", channel,
1110 idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
1111 idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
1112 idmac_read_icreg(ipu, IDMAC_CHA_BUSY),
1113 idmac_read_ipureg(ipu, IPU_TASKS_STAT));
1114 break;
1115 }
1116 }
1117 dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout);
1118 }
1119 /* SDC BG and FG must be disabled before DMA is disabled */
1120 if (wait_for_stop && (channel == IDMAC_SDC_0 ||
1121 channel == IDMAC_SDC_1)) {
1122 for (timeout = 5;
1123 timeout && !ipu_irq_status(ichan->eof_irq); timeout--)
1124 msleep(5);
1125 }
1126
1127 spin_lock_irqsave(&ipu->lock, flags);
1128
1129 /* Disable IC task */
1130 ipu_ic_disable_task(ipu, channel);
1131
1132 /* Disable DMA channel(s) */
1133 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
1134 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
1135
1136 /*
1137 * Problem (observed with channel DMAIC_7): after enabling the channel
1138 * and initialising buffers, there comes an interrupt with current still
1139 * pointing at buffer 0, whereas it should use buffer 0 first and only
1140 * generate an interrupt when it is done, then current should already
1141 * point to buffer 1. This spurious interrupt also comes on channel
1142 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
1143 * first interrupt, there comes the second with current correctly
1144 * pointing to buffer 1 this time. But sometimes this second interrupt
1145 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
1146 * the channel seems to prevent the channel from hanging, but it doesn't
1147 * prevent the spurious interrupt. This might also be unsafe. Think
1148 * about the IDMAC controller trying to switch to a buffer, when we
1149 * clear the ready bit, and re-enable it a moment later.
1150 */
1151 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
1152 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
1153 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
1154
1155 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
1156 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
1157 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
1158
1159 spin_unlock_irqrestore(&ipu->lock, flags);
1160
1161 return 0;
1162}
1163
1164/*
1165 * We have several possibilities here:
1166 * current BUF next BUF
1167 *
1168 * not last sg next not last sg
1169 * not last sg next last sg
1170 * last sg first sg from next descriptor
1171 * last sg NULL
1172 *
1173 * Besides, the descriptor queue might be empty or not. We process all these
1174 * cases carefully.
1175 */
1176static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1177{
1178 struct idmac_channel *ichan = dev_id;
1179 unsigned int chan_id = ichan->dma_chan.chan_id;
1180 struct scatterlist **sg, *sgnext, *sgnew = NULL;
1181 /* Next transfer descriptor */
1182 struct idmac_tx_desc *desc = NULL, *descnew;
1183 dma_async_tx_callback callback;
1184 void *callback_param;
1185 bool done = false;
1186 u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY),
1187 ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY),
1188 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1189
1190 /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
1191
1192 pr_debug("IDMAC irq %d\n", irq);
1193 /* Other interrupts do not interfere with this channel */
1194 spin_lock(&ichan->lock);
1195
1196 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1197 ((curbuf >> chan_id) & 1) == ichan->active_buffer)) {
1198 int i = 100;
1199
1200 /* This doesn't help. See comment in ipu_disable_channel() */
1201 while (--i) {
1202 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1203 if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
1204 break;
1205 cpu_relax();
1206 }
1207
1208 if (!i) {
1209 spin_unlock(&ichan->lock);
1210 dev_dbg(ichan->dma_chan.device->dev,
1211 "IRQ on active buffer on channel %x, active "
1212 "%d, ready %x, %x, current %x!\n", chan_id,
1213 ichan->active_buffer, ready0, ready1, curbuf);
1214 return IRQ_NONE;
1215 }
1216 }
1217
1218 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1219 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1220 )) {
1221 spin_unlock(&ichan->lock);
1222 dev_dbg(ichan->dma_chan.device->dev,
1223 "IRQ with active buffer still ready on channel %x, "
1224 "active %d, ready %x, %x!\n", chan_id,
1225 ichan->active_buffer, ready0, ready1);
1226 return IRQ_NONE;
1227 }
1228
1229 if (unlikely(list_empty(&ichan->queue))) {
1230 spin_unlock(&ichan->lock);
1231 dev_err(ichan->dma_chan.device->dev,
1232 "IRQ without queued buffers on channel %x, active %d, "
1233 "ready %x, %x!\n", chan_id,
1234 ichan->active_buffer, ready0, ready1);
1235 return IRQ_NONE;
1236 }
1237
1238 /*
1239 * active_buffer is a software flag, it shows which buffer we are
1240 * currently expecting back from the hardware, IDMAC should be
1241 * processing the other buffer already
1242 */
1243 sg = &ichan->sg[ichan->active_buffer];
1244 sgnext = ichan->sg[!ichan->active_buffer];
1245
1246 /*
1247 * if sgnext == NULL sg must be the last element in a scatterlist and
1248 * queue must be empty
1249 */
1250 if (unlikely(!sgnext)) {
1251 if (unlikely(sg_next(*sg))) {
1252 dev_err(ichan->dma_chan.device->dev,
1253 "Broken buffer-update locking on channel %x!\n",
1254 chan_id);
1255 /* We'll let the user catch up */
1256 } else {
1257 /* Underrun */
1258 ipu_ic_disable_task(&ipu_data, chan_id);
1259 dev_dbg(ichan->dma_chan.device->dev,
1260 "Underrun on channel %x\n", chan_id);
1261 ichan->status = IPU_CHANNEL_READY;
1262 /* Continue to check for complete descriptor */
1263 }
1264 }
1265
1266 desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
1267
1268 /* First calculate and submit the next sg element */
1269 if (likely(sgnext))
1270 sgnew = sg_next(sgnext);
1271
1272 if (unlikely(!sgnew)) {
1273 /* Start a new scatterlist, if any queued */
1274 if (likely(desc->list.next != &ichan->queue)) {
1275 descnew = list_entry(desc->list.next,
1276 struct idmac_tx_desc, list);
1277 sgnew = &descnew->sg[0];
1278 }
1279 }
1280
1281 if (unlikely(!sg_next(*sg)) || !sgnext) {
1282 /*
1283 * Last element in scatterlist done, remove from the queue,
1284 * _init for debugging
1285 */
1286 list_del_init(&desc->list);
1287 done = true;
1288 }
1289
1290 *sg = sgnew;
1291
1292 if (likely(sgnew)) {
1293 int ret;
1294
1295 ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer,
1296 sg_dma_address(*sg));
1297 if (ret < 0)
1298 dev_err(ichan->dma_chan.device->dev,
1299 "Failed to update buffer on channel %x buffer %d!\n",
1300 chan_id, ichan->active_buffer);
1301 else
1302 ipu_select_buffer(chan_id, ichan->active_buffer);
1303 }
1304
1305 /* Flip the active buffer - even if update above failed */
1306 ichan->active_buffer = !ichan->active_buffer;
1307 if (done)
1308 ichan->completed = desc->txd.cookie;
1309
1310 callback = desc->txd.callback;
1311 callback_param = desc->txd.callback_param;
1312
1313 spin_unlock(&ichan->lock);
1314
1315 if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
1316 callback(callback_param);
1317
1318 return IRQ_HANDLED;
1319}
1320
1321static void ipu_gc_tasklet(unsigned long arg)
1322{
1323 struct ipu *ipu = (struct ipu *)arg;
1324 int i;
1325
1326 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1327 struct idmac_channel *ichan = ipu->channel + i;
1328 struct idmac_tx_desc *desc;
1329 unsigned long flags;
1330 int j;
1331
1332 for (j = 0; j < ichan->n_tx_desc; j++) {
1333 desc = ichan->desc + j;
1334 spin_lock_irqsave(&ichan->lock, flags);
1335 if (async_tx_test_ack(&desc->txd)) {
1336 list_move(&desc->list, &ichan->free_list);
1337 async_tx_clear_ack(&desc->txd);
1338 }
1339 spin_unlock_irqrestore(&ichan->lock, flags);
1340 }
1341 }
1342}
1343
1344/*
1345 * At the time .device_alloc_chan_resources() method is called, we cannot know,
1346 * whether the client will accept the channel. Thus we must only check, if we
1347 * can satisfy client's request but the only real criterion to verify, whether
1348 * the client has accepted our offer is the client_count. That's why we have to
1349 * perform the rest of our allocation tasks on the first call to this function.
1350 */
1351static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
1352 struct scatterlist *sgl, unsigned int sg_len,
1353 enum dma_data_direction direction, unsigned long tx_flags)
1354{
1355 struct idmac_channel *ichan = to_idmac_chan(chan);
1356 struct idmac_tx_desc *desc = NULL;
1357 struct dma_async_tx_descriptor *txd = NULL;
1358 unsigned long flags;
1359
1360 /* We only can handle these three channels so far */
1361 if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 &&
1362 ichan->dma_chan.chan_id != IDMAC_IC_7)
1363 return NULL;
1364
1365 if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
1366 dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
1367 return NULL;
1368 }
1369
1370 mutex_lock(&ichan->chan_mutex);
1371
1372 spin_lock_irqsave(&ichan->lock, flags);
1373 if (!list_empty(&ichan->free_list)) {
1374 desc = list_entry(ichan->free_list.next,
1375 struct idmac_tx_desc, list);
1376
1377 list_del_init(&desc->list);
1378
1379 desc->sg_len = sg_len;
1380 desc->sg = sgl;
1381 txd = &desc->txd;
1382 txd->flags = tx_flags;
1383 }
1384 spin_unlock_irqrestore(&ichan->lock, flags);
1385
1386 mutex_unlock(&ichan->chan_mutex);
1387
1388 tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet);
1389
1390 return txd;
1391}
1392
1393/* Re-select the current buffer and re-activate the channel */
1394static void idmac_issue_pending(struct dma_chan *chan)
1395{
1396 struct idmac_channel *ichan = to_idmac_chan(chan);
1397 struct idmac *idmac = to_idmac(chan->device);
1398 struct ipu *ipu = to_ipu(idmac);
1399 unsigned long flags;
1400
1401 /* This is not always needed, but doesn't hurt either */
1402 spin_lock_irqsave(&ipu->lock, flags);
1403 ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer);
1404 spin_unlock_irqrestore(&ipu->lock, flags);
1405
1406 /*
1407 * Might need to perform some parts of initialisation from
1408 * ipu_enable_channel(), but not all, we do not want to reset to buffer
1409 * 0, don't need to set priority again either, but re-enabling the task
1410 * and the channel might be a good idea.
1411 */
1412}
1413
1414static void __idmac_terminate_all(struct dma_chan *chan)
1415{
1416 struct idmac_channel *ichan = to_idmac_chan(chan);
1417 struct idmac *idmac = to_idmac(chan->device);
1418 unsigned long flags;
1419 int i;
1420
1421 ipu_disable_channel(idmac, ichan,
1422 ichan->status >= IPU_CHANNEL_ENABLED);
1423
1424 tasklet_disable(&to_ipu(idmac)->tasklet);
1425
1426 /* ichan->queue is modified in ISR, have to spinlock */
1427 spin_lock_irqsave(&ichan->lock, flags);
1428 list_splice_init(&ichan->queue, &ichan->free_list);
1429
1430 if (ichan->desc)
1431 for (i = 0; i < ichan->n_tx_desc; i++) {
1432 struct idmac_tx_desc *desc = ichan->desc + i;
1433 if (list_empty(&desc->list))
1434 /* Descriptor was prepared, but not submitted */
1435 list_add(&desc->list,
1436 &ichan->free_list);
1437
1438 async_tx_clear_ack(&desc->txd);
1439 }
1440
1441 ichan->sg[0] = NULL;
1442 ichan->sg[1] = NULL;
1443 spin_unlock_irqrestore(&ichan->lock, flags);
1444
1445 tasklet_enable(&to_ipu(idmac)->tasklet);
1446
1447 ichan->status = IPU_CHANNEL_INITIALIZED;
1448}
1449
1450static void idmac_terminate_all(struct dma_chan *chan)
1451{
1452 struct idmac_channel *ichan = to_idmac_chan(chan);
1453
1454 mutex_lock(&ichan->chan_mutex);
1455
1456 __idmac_terminate_all(chan);
1457
1458 mutex_unlock(&ichan->chan_mutex);
1459}
1460
1461static int idmac_alloc_chan_resources(struct dma_chan *chan)
1462{
1463 struct idmac_channel *ichan = to_idmac_chan(chan);
1464 struct idmac *idmac = to_idmac(chan->device);
1465 int ret;
1466
1467 /* dmaengine.c now guarantees to only offer free channels */
1468 BUG_ON(chan->client_count > 1);
1469 WARN_ON(ichan->status != IPU_CHANNEL_FREE);
1470
1471 chan->cookie = 1;
1472 ichan->completed = -ENXIO;
1473
1474 ret = ipu_irq_map(ichan->dma_chan.chan_id);
1475 if (ret < 0)
1476 goto eimap;
1477
1478 ichan->eof_irq = ret;
1479 ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
1480 ichan->eof_name, ichan);
1481 if (ret < 0)
1482 goto erirq;
1483
1484 ret = ipu_init_channel(idmac, ichan);
1485 if (ret < 0)
1486 goto eichan;
1487
1488 ichan->status = IPU_CHANNEL_INITIALIZED;
1489
1490 dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n",
1491 ichan->dma_chan.chan_id, ichan->eof_irq);
1492
1493 return ret;
1494
1495eichan:
1496 free_irq(ichan->eof_irq, ichan);
1497erirq:
1498 ipu_irq_unmap(ichan->dma_chan.chan_id);
1499eimap:
1500 return ret;
1501}
1502
1503static void idmac_free_chan_resources(struct dma_chan *chan)
1504{
1505 struct idmac_channel *ichan = to_idmac_chan(chan);
1506 struct idmac *idmac = to_idmac(chan->device);
1507
1508 mutex_lock(&ichan->chan_mutex);
1509
1510 __idmac_terminate_all(chan);
1511
1512 if (ichan->status > IPU_CHANNEL_FREE) {
1513 free_irq(ichan->eof_irq, ichan);
1514 ipu_irq_unmap(ichan->dma_chan.chan_id);
1515 }
1516
1517 ichan->status = IPU_CHANNEL_FREE;
1518
1519 ipu_uninit_channel(idmac, ichan);
1520
1521 mutex_unlock(&ichan->chan_mutex);
1522
1523 tasklet_schedule(&to_ipu(idmac)->tasklet);
1524}
1525
1526static enum dma_status idmac_is_tx_complete(struct dma_chan *chan,
1527 dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
1528{
1529 struct idmac_channel *ichan = to_idmac_chan(chan);
1530
1531 if (done)
1532 *done = ichan->completed;
1533 if (used)
1534 *used = chan->cookie;
1535 if (cookie != chan->cookie)
1536 return DMA_ERROR;
1537 return DMA_SUCCESS;
1538}
1539
1540static int __init ipu_idmac_init(struct ipu *ipu)
1541{
1542 struct idmac *idmac = &ipu->idmac;
1543 struct dma_device *dma = &idmac->dma;
1544 int i;
1545
1546 dma_cap_set(DMA_SLAVE, dma->cap_mask);
1547 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
1548
1549 /* Compulsory common fields */
1550 dma->dev = ipu->dev;
1551 dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
1552 dma->device_free_chan_resources = idmac_free_chan_resources;
1553 dma->device_is_tx_complete = idmac_is_tx_complete;
1554 dma->device_issue_pending = idmac_issue_pending;
1555
1556 /* Compulsory for DMA_SLAVE fields */
1557 dma->device_prep_slave_sg = idmac_prep_slave_sg;
1558 dma->device_terminate_all = idmac_terminate_all;
1559
1560 INIT_LIST_HEAD(&dma->channels);
1561 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1562 struct idmac_channel *ichan = ipu->channel + i;
1563 struct dma_chan *dma_chan = &ichan->dma_chan;
1564
1565 spin_lock_init(&ichan->lock);
1566 mutex_init(&ichan->chan_mutex);
1567
1568 ichan->status = IPU_CHANNEL_FREE;
1569 ichan->sec_chan_en = false;
1570 ichan->completed = -ENXIO;
1571 snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
1572
1573 dma_chan->device = &idmac->dma;
1574 dma_chan->cookie = 1;
1575 dma_chan->chan_id = i;
1576 list_add_tail(&ichan->dma_chan.device_node, &dma->channels);
1577 }
1578
1579 idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
1580
1581 return dma_async_device_register(&idmac->dma);
1582}
1583
1584static void ipu_idmac_exit(struct ipu *ipu)
1585{
1586 int i;
1587 struct idmac *idmac = &ipu->idmac;
1588
1589 for (i = 0; i < IPU_CHANNELS_NUM; i++) {
1590 struct idmac_channel *ichan = ipu->channel + i;
1591
1592 idmac_terminate_all(&ichan->dma_chan);
1593 idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
1594 }
1595
1596 dma_async_device_unregister(&idmac->dma);
1597}
1598
1599/*****************************************************************************
1600 * IPU common probe / remove
1601 */
1602
1603static int ipu_probe(struct platform_device *pdev)
1604{
1605 struct ipu_platform_data *pdata = pdev->dev.platform_data;
1606 struct resource *mem_ipu, *mem_ic;
1607 int ret;
1608
1609 spin_lock_init(&ipu_data.lock);
1610
1611 mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1612 mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1613 if (!pdata || !mem_ipu || !mem_ic)
1614 return -EINVAL;
1615
1616 ipu_data.dev = &pdev->dev;
1617
1618 platform_set_drvdata(pdev, &ipu_data);
1619
1620 ret = platform_get_irq(pdev, 0);
1621 if (ret < 0)
1622 goto err_noirq;
1623
1624 ipu_data.irq_fn = ret;
1625 ret = platform_get_irq(pdev, 1);
1626 if (ret < 0)
1627 goto err_noirq;
1628
1629 ipu_data.irq_err = ret;
1630 ipu_data.irq_base = pdata->irq_base;
1631
1632 dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n",
1633 ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
1634
1635 /* Remap IPU common registers */
1636 ipu_data.reg_ipu = ioremap(mem_ipu->start,
1637 mem_ipu->end - mem_ipu->start + 1);
1638 if (!ipu_data.reg_ipu) {
1639 ret = -ENOMEM;
1640 goto err_ioremap_ipu;
1641 }
1642
1643 /* Remap Image Converter and Image DMA Controller registers */
1644 ipu_data.reg_ic = ioremap(mem_ic->start,
1645 mem_ic->end - mem_ic->start + 1);
1646 if (!ipu_data.reg_ic) {
1647 ret = -ENOMEM;
1648 goto err_ioremap_ic;
1649 }
1650
1651 /* Get IPU clock */
1652 ipu_data.ipu_clk = clk_get(&pdev->dev, "ipu_clk");
1653 if (IS_ERR(ipu_data.ipu_clk)) {
1654 ret = PTR_ERR(ipu_data.ipu_clk);
1655 goto err_clk_get;
1656 }
1657
1658 /* Make sure IPU HSP clock is running */
1659 clk_enable(ipu_data.ipu_clk);
1660
1661 /* Disable all interrupts */
1662 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
1663 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2);
1664 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3);
1665 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4);
1666 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5);
1667
1668 dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name,
1669 (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err);
1670
1671 ret = ipu_irq_attach_irq(&ipu_data, pdev);
1672 if (ret < 0)
1673 goto err_attach_irq;
1674
1675 /* Initialize DMA engine */
1676 ret = ipu_idmac_init(&ipu_data);
1677 if (ret < 0)
1678 goto err_idmac_init;
1679
1680 tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data);
1681
1682 ipu_data.dev = &pdev->dev;
1683
1684 dev_dbg(ipu_data.dev, "IPU initialized\n");
1685
1686 return 0;
1687
1688err_idmac_init:
1689err_attach_irq:
1690 ipu_irq_detach_irq(&ipu_data, pdev);
1691 clk_disable(ipu_data.ipu_clk);
1692 clk_put(ipu_data.ipu_clk);
1693err_clk_get:
1694 iounmap(ipu_data.reg_ic);
1695err_ioremap_ic:
1696 iounmap(ipu_data.reg_ipu);
1697err_ioremap_ipu:
1698err_noirq:
1699 dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret);
1700 return ret;
1701}
1702
1703static int ipu_remove(struct platform_device *pdev)
1704{
1705 struct ipu *ipu = platform_get_drvdata(pdev);
1706
1707 ipu_idmac_exit(ipu);
1708 ipu_irq_detach_irq(ipu, pdev);
1709 clk_disable(ipu->ipu_clk);
1710 clk_put(ipu->ipu_clk);
1711 iounmap(ipu->reg_ic);
1712 iounmap(ipu->reg_ipu);
1713 tasklet_kill(&ipu->tasklet);
1714 platform_set_drvdata(pdev, NULL);
1715
1716 return 0;
1717}
1718
1719/*
1720 * We need two MEM resources - with IPU-common and Image Converter registers,
1721 * including PF_CONF and IDMAC_* registers, and two IRQs - function and error
1722 */
1723static struct platform_driver ipu_platform_driver = {
1724 .driver = {
1725 .name = "ipu-core",
1726 .owner = THIS_MODULE,
1727 },
1728 .remove = ipu_remove,
1729};
1730
1731static int __init ipu_init(void)
1732{
1733 return platform_driver_probe(&ipu_platform_driver, ipu_probe);
1734}
1735subsys_initcall(ipu_init);
1736
1737MODULE_DESCRIPTION("IPU core driver");
1738MODULE_LICENSE("GPL v2");
1739MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
1740MODULE_ALIAS("platform:ipu-core");
diff --git a/drivers/dma/ipu/ipu_intern.h b/drivers/dma/ipu/ipu_intern.h
new file mode 100644
index 000000000000..545cf11a94ab
--- /dev/null
+++ b/drivers/dma/ipu/ipu_intern.h
@@ -0,0 +1,176 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _IPU_INTERN_H_
13#define _IPU_INTERN_H_
14
15#include <linux/dmaengine.h>
16#include <linux/platform_device.h>
17#include <linux/interrupt.h>
18
19/* IPU Common registers */
20#define IPU_CONF 0x00
21#define IPU_CHA_BUF0_RDY 0x04
22#define IPU_CHA_BUF1_RDY 0x08
23#define IPU_CHA_DB_MODE_SEL 0x0C
24#define IPU_CHA_CUR_BUF 0x10
25#define IPU_FS_PROC_FLOW 0x14
26#define IPU_FS_DISP_FLOW 0x18
27#define IPU_TASKS_STAT 0x1C
28#define IPU_IMA_ADDR 0x20
29#define IPU_IMA_DATA 0x24
30#define IPU_INT_CTRL_1 0x28
31#define IPU_INT_CTRL_2 0x2C
32#define IPU_INT_CTRL_3 0x30
33#define IPU_INT_CTRL_4 0x34
34#define IPU_INT_CTRL_5 0x38
35#define IPU_INT_STAT_1 0x3C
36#define IPU_INT_STAT_2 0x40
37#define IPU_INT_STAT_3 0x44
38#define IPU_INT_STAT_4 0x48
39#define IPU_INT_STAT_5 0x4C
40#define IPU_BRK_CTRL_1 0x50
41#define IPU_BRK_CTRL_2 0x54
42#define IPU_BRK_STAT 0x58
43#define IPU_DIAGB_CTRL 0x5C
44
45/* IPU_CONF Register bits */
46#define IPU_CONF_CSI_EN 0x00000001
47#define IPU_CONF_IC_EN 0x00000002
48#define IPU_CONF_ROT_EN 0x00000004
49#define IPU_CONF_PF_EN 0x00000008
50#define IPU_CONF_SDC_EN 0x00000010
51#define IPU_CONF_ADC_EN 0x00000020
52#define IPU_CONF_DI_EN 0x00000040
53#define IPU_CONF_DU_EN 0x00000080
54#define IPU_CONF_PXL_ENDIAN 0x00000100
55
56/* Image Converter Registers */
57#define IC_CONF 0x88
58#define IC_PRP_ENC_RSC 0x8C
59#define IC_PRP_VF_RSC 0x90
60#define IC_PP_RSC 0x94
61#define IC_CMBP_1 0x98
62#define IC_CMBP_2 0x9C
63#define PF_CONF 0xA0
64#define IDMAC_CONF 0xA4
65#define IDMAC_CHA_EN 0xA8
66#define IDMAC_CHA_PRI 0xAC
67#define IDMAC_CHA_BUSY 0xB0
68
69/* Image Converter Register bits */
70#define IC_CONF_PRPENC_EN 0x00000001
71#define IC_CONF_PRPENC_CSC1 0x00000002
72#define IC_CONF_PRPENC_ROT_EN 0x00000004
73#define IC_CONF_PRPVF_EN 0x00000100
74#define IC_CONF_PRPVF_CSC1 0x00000200
75#define IC_CONF_PRPVF_CSC2 0x00000400
76#define IC_CONF_PRPVF_CMB 0x00000800
77#define IC_CONF_PRPVF_ROT_EN 0x00001000
78#define IC_CONF_PP_EN 0x00010000
79#define IC_CONF_PP_CSC1 0x00020000
80#define IC_CONF_PP_CSC2 0x00040000
81#define IC_CONF_PP_CMB 0x00080000
82#define IC_CONF_PP_ROT_EN 0x00100000
83#define IC_CONF_IC_GLB_LOC_A 0x10000000
84#define IC_CONF_KEY_COLOR_EN 0x20000000
85#define IC_CONF_RWS_EN 0x40000000
86#define IC_CONF_CSI_MEM_WR_EN 0x80000000
87
88#define IDMA_CHAN_INVALID 0x000000FF
89#define IDMA_IC_0 0x00000001
90#define IDMA_IC_1 0x00000002
91#define IDMA_IC_2 0x00000004
92#define IDMA_IC_3 0x00000008
93#define IDMA_IC_4 0x00000010
94#define IDMA_IC_5 0x00000020
95#define IDMA_IC_6 0x00000040
96#define IDMA_IC_7 0x00000080
97#define IDMA_IC_8 0x00000100
98#define IDMA_IC_9 0x00000200
99#define IDMA_IC_10 0x00000400
100#define IDMA_IC_11 0x00000800
101#define IDMA_IC_12 0x00001000
102#define IDMA_IC_13 0x00002000
103#define IDMA_SDC_BG 0x00004000
104#define IDMA_SDC_FG 0x00008000
105#define IDMA_SDC_MASK 0x00010000
106#define IDMA_SDC_PARTIAL 0x00020000
107#define IDMA_ADC_SYS1_WR 0x00040000
108#define IDMA_ADC_SYS2_WR 0x00080000
109#define IDMA_ADC_SYS1_CMD 0x00100000
110#define IDMA_ADC_SYS2_CMD 0x00200000
111#define IDMA_ADC_SYS1_RD 0x00400000
112#define IDMA_ADC_SYS2_RD 0x00800000
113#define IDMA_PF_QP 0x01000000
114#define IDMA_PF_BSP 0x02000000
115#define IDMA_PF_Y_IN 0x04000000
116#define IDMA_PF_U_IN 0x08000000
117#define IDMA_PF_V_IN 0x10000000
118#define IDMA_PF_Y_OUT 0x20000000
119#define IDMA_PF_U_OUT 0x40000000
120#define IDMA_PF_V_OUT 0x80000000
121
122#define TSTAT_PF_H264_PAUSE 0x00000001
123#define TSTAT_CSI2MEM_MASK 0x0000000C
124#define TSTAT_CSI2MEM_OFFSET 2
125#define TSTAT_VF_MASK 0x00000600
126#define TSTAT_VF_OFFSET 9
127#define TSTAT_VF_ROT_MASK 0x000C0000
128#define TSTAT_VF_ROT_OFFSET 18
129#define TSTAT_ENC_MASK 0x00000180
130#define TSTAT_ENC_OFFSET 7
131#define TSTAT_ENC_ROT_MASK 0x00030000
132#define TSTAT_ENC_ROT_OFFSET 16
133#define TSTAT_PP_MASK 0x00001800
134#define TSTAT_PP_OFFSET 11
135#define TSTAT_PP_ROT_MASK 0x00300000
136#define TSTAT_PP_ROT_OFFSET 20
137#define TSTAT_PF_MASK 0x00C00000
138#define TSTAT_PF_OFFSET 22
139#define TSTAT_ADCSYS1_MASK 0x03000000
140#define TSTAT_ADCSYS1_OFFSET 24
141#define TSTAT_ADCSYS2_MASK 0x0C000000
142#define TSTAT_ADCSYS2_OFFSET 26
143
144#define TASK_STAT_IDLE 0
145#define TASK_STAT_ACTIVE 1
146#define TASK_STAT_WAIT4READY 2
147
148struct idmac {
149 struct dma_device dma;
150};
151
152struct ipu {
153 void __iomem *reg_ipu;
154 void __iomem *reg_ic;
155 unsigned int irq_fn; /* IPU Function IRQ to the CPU */
156 unsigned int irq_err; /* IPU Error IRQ to the CPU */
157 unsigned int irq_base; /* Beginning of the IPU IRQ range */
158 unsigned long channel_init_mask;
159 spinlock_t lock;
160 struct clk *ipu_clk;
161 struct device *dev;
162 struct idmac idmac;
163 struct idmac_channel channel[IPU_CHANNELS_NUM];
164 struct tasklet_struct tasklet;
165};
166
167#define to_idmac(d) container_of(d, struct idmac, dma)
168
169extern int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev);
170extern void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev);
171
172extern bool ipu_irq_status(uint32_t irq);
173extern int ipu_irq_map(unsigned int source);
174extern int ipu_irq_unmap(unsigned int source);
175
176#endif
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
new file mode 100644
index 000000000000..83f532cc767f
--- /dev/null
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -0,0 +1,413 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/init.h>
11#include <linux/err.h>
12#include <linux/spinlock.h>
13#include <linux/delay.h>
14#include <linux/clk.h>
15#include <linux/irq.h>
16#include <linux/io.h>
17
18#include <mach/ipu.h>
19
20#include "ipu_intern.h"
21
22/*
23 * Register read / write - shall be inlined by the compiler
24 */
25static u32 ipu_read_reg(struct ipu *ipu, unsigned long reg)
26{
27 return __raw_readl(ipu->reg_ipu + reg);
28}
29
30static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
31{
32 __raw_writel(value, ipu->reg_ipu + reg);
33}
34
35
36/*
37 * IPU IRQ chip driver
38 */
39
40#define IPU_IRQ_NR_FN_BANKS 3
41#define IPU_IRQ_NR_ERR_BANKS 2
42#define IPU_IRQ_NR_BANKS (IPU_IRQ_NR_FN_BANKS + IPU_IRQ_NR_ERR_BANKS)
43
44struct ipu_irq_bank {
45 unsigned int control;
46 unsigned int status;
47 spinlock_t lock;
48 struct ipu *ipu;
49};
50
51static struct ipu_irq_bank irq_bank[IPU_IRQ_NR_BANKS] = {
52 /* 3 groups of functional interrupts */
53 {
54 .control = IPU_INT_CTRL_1,
55 .status = IPU_INT_STAT_1,
56 }, {
57 .control = IPU_INT_CTRL_2,
58 .status = IPU_INT_STAT_2,
59 }, {
60 .control = IPU_INT_CTRL_3,
61 .status = IPU_INT_STAT_3,
62 },
63 /* 2 groups of error interrupts */
64 {
65 .control = IPU_INT_CTRL_4,
66 .status = IPU_INT_STAT_4,
67 }, {
68 .control = IPU_INT_CTRL_5,
69 .status = IPU_INT_STAT_5,
70 },
71};
72
73struct ipu_irq_map {
74 unsigned int irq;
75 int source;
76 struct ipu_irq_bank *bank;
77 struct ipu *ipu;
78};
79
80static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS];
81/* Protects allocations from the above array of maps */
82static DEFINE_MUTEX(map_lock);
83/* Protects register accesses and individual mappings */
84static DEFINE_SPINLOCK(bank_lock);
85
86static struct ipu_irq_map *src2map(unsigned int src)
87{
88 int i;
89
90 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++)
91 if (irq_map[i].source == src)
92 return irq_map + i;
93
94 return NULL;
95}
96
97static void ipu_irq_unmask(unsigned int irq)
98{
99 struct ipu_irq_map *map = get_irq_chip_data(irq);
100 struct ipu_irq_bank *bank;
101 uint32_t reg;
102 unsigned long lock_flags;
103
104 spin_lock_irqsave(&bank_lock, lock_flags);
105
106 bank = map->bank;
107 if (!bank) {
108 spin_unlock_irqrestore(&bank_lock, lock_flags);
109 pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
110 return;
111 }
112
113 reg = ipu_read_reg(bank->ipu, bank->control);
114 reg |= (1UL << (map->source & 31));
115 ipu_write_reg(bank->ipu, reg, bank->control);
116
117 spin_unlock_irqrestore(&bank_lock, lock_flags);
118}
119
120static void ipu_irq_mask(unsigned int irq)
121{
122 struct ipu_irq_map *map = get_irq_chip_data(irq);
123 struct ipu_irq_bank *bank;
124 uint32_t reg;
125 unsigned long lock_flags;
126
127 spin_lock_irqsave(&bank_lock, lock_flags);
128
129 bank = map->bank;
130 if (!bank) {
131 spin_unlock_irqrestore(&bank_lock, lock_flags);
132 pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
133 return;
134 }
135
136 reg = ipu_read_reg(bank->ipu, bank->control);
137 reg &= ~(1UL << (map->source & 31));
138 ipu_write_reg(bank->ipu, reg, bank->control);
139
140 spin_unlock_irqrestore(&bank_lock, lock_flags);
141}
142
143static void ipu_irq_ack(unsigned int irq)
144{
145 struct ipu_irq_map *map = get_irq_chip_data(irq);
146 struct ipu_irq_bank *bank;
147 unsigned long lock_flags;
148
149 spin_lock_irqsave(&bank_lock, lock_flags);
150
151 bank = map->bank;
152 if (!bank) {
153 spin_unlock_irqrestore(&bank_lock, lock_flags);
154 pr_err("IPU: %s(%u) - unmapped!\n", __func__, irq);
155 return;
156 }
157
158 ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
159 spin_unlock_irqrestore(&bank_lock, lock_flags);
160}
161
162/**
163 * ipu_irq_status() - returns the current interrupt status of the specified IRQ.
164 * @irq: interrupt line to get status for.
165 * @return: true if the interrupt is pending/asserted or false if the
166 * interrupt is not pending.
167 */
168bool ipu_irq_status(unsigned int irq)
169{
170 struct ipu_irq_map *map = get_irq_chip_data(irq);
171 struct ipu_irq_bank *bank;
172 unsigned long lock_flags;
173 bool ret;
174
175 spin_lock_irqsave(&bank_lock, lock_flags);
176 bank = map->bank;
177 ret = bank && ipu_read_reg(bank->ipu, bank->status) &
178 (1UL << (map->source & 31));
179 spin_unlock_irqrestore(&bank_lock, lock_flags);
180
181 return ret;
182}
183
184/**
185 * ipu_irq_map() - map an IPU interrupt source to an IRQ number
186 * @source: interrupt source bit position (see below)
187 * @return: mapped IRQ number or negative error code
188 *
189 * The source parameter has to be explained further. On i.MX31 IPU has 137 IRQ
190 * sources, they are broken down in 5 32-bit registers, like 32, 32, 24, 32, 17.
191 * However, the source argument of this function is not the sequence number of
192 * the possible IRQ, but rather its bit position. So, first interrupt in fourth
193 * register has source number 96, and not 88. This makes calculations easier,
194 * and also provides forward compatibility with any future IPU implementations
195 * with any interrupt bit assignments.
196 */
197int ipu_irq_map(unsigned int source)
198{
199 int i, ret = -ENOMEM;
200 struct ipu_irq_map *map;
201
202 might_sleep();
203
204 mutex_lock(&map_lock);
205 map = src2map(source);
206 if (map) {
207 pr_err("IPU: Source %u already mapped to IRQ %u\n", source, map->irq);
208 ret = -EBUSY;
209 goto out;
210 }
211
212 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
213 if (irq_map[i].source < 0) {
214 unsigned long lock_flags;
215
216 spin_lock_irqsave(&bank_lock, lock_flags);
217 irq_map[i].source = source;
218 irq_map[i].bank = irq_bank + source / 32;
219 spin_unlock_irqrestore(&bank_lock, lock_flags);
220
221 ret = irq_map[i].irq;
222 pr_debug("IPU: mapped source %u to IRQ %u\n",
223 source, ret);
224 break;
225 }
226 }
227out:
228 mutex_unlock(&map_lock);
229
230 if (ret < 0)
231 pr_err("IPU: couldn't map source %u: %d\n", source, ret);
232
233 return ret;
234}
235
236/**
237 * ipu_irq_map() - map an IPU interrupt source to an IRQ number
238 * @source: interrupt source bit position (see ipu_irq_map())
239 * @return: 0 or negative error code
240 */
241int ipu_irq_unmap(unsigned int source)
242{
243 int i, ret = -EINVAL;
244
245 might_sleep();
246
247 mutex_lock(&map_lock);
248 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
249 if (irq_map[i].source == source) {
250 unsigned long lock_flags;
251
252 pr_debug("IPU: unmapped source %u from IRQ %u\n",
253 source, irq_map[i].irq);
254
255 spin_lock_irqsave(&bank_lock, lock_flags);
256 irq_map[i].source = -EINVAL;
257 irq_map[i].bank = NULL;
258 spin_unlock_irqrestore(&bank_lock, lock_flags);
259
260 ret = 0;
261 break;
262 }
263 }
264 mutex_unlock(&map_lock);
265
266 return ret;
267}
268
269/* Chained IRQ handler for IPU error interrupt */
270static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
271{
272 struct ipu *ipu = get_irq_data(irq);
273 u32 status;
274 int i, line;
275
276 for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
277 struct ipu_irq_bank *bank = irq_bank + i;
278
279 spin_lock(&bank_lock);
280 status = ipu_read_reg(ipu, bank->status);
281 /*
282 * Don't think we have to clear all interrupts here, they will
283 * be acked by ->handle_irq() (handle_level_irq). However, we
284 * might want to clear unhandled interrupts after the loop...
285 */
286 status &= ipu_read_reg(ipu, bank->control);
287 spin_unlock(&bank_lock);
288 while ((line = ffs(status))) {
289 struct ipu_irq_map *map;
290
291 line--;
292 status &= ~(1UL << line);
293
294 spin_lock(&bank_lock);
295 map = src2map(32 * i + line);
296 if (map)
297 irq = map->irq;
298 spin_unlock(&bank_lock);
299
300 if (!map) {
301 pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
302 line, i);
303 continue;
304 }
305 generic_handle_irq(irq);
306 }
307 }
308}
309
310/* Chained IRQ handler for IPU function interrupt */
311static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc)
312{
313 struct ipu *ipu = get_irq_data(irq);
314 u32 status;
315 int i, line;
316
317 for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
318 struct ipu_irq_bank *bank = irq_bank + i;
319
320 spin_lock(&bank_lock);
321 status = ipu_read_reg(ipu, bank->status);
322 /* Not clearing all interrupts, see above */
323 status &= ipu_read_reg(ipu, bank->control);
324 spin_unlock(&bank_lock);
325 while ((line = ffs(status))) {
326 struct ipu_irq_map *map;
327
328 line--;
329 status &= ~(1UL << line);
330
331 spin_lock(&bank_lock);
332 map = src2map(32 * i + line);
333 if (map)
334 irq = map->irq;
335 spin_unlock(&bank_lock);
336
337 if (!map) {
338 pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
339 line, i);
340 continue;
341 }
342 generic_handle_irq(irq);
343 }
344 }
345}
346
347static struct irq_chip ipu_irq_chip = {
348 .name = "ipu_irq",
349 .ack = ipu_irq_ack,
350 .mask = ipu_irq_mask,
351 .unmask = ipu_irq_unmask,
352};
353
354/* Install the IRQ handler */
355int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
356{
357 struct ipu_platform_data *pdata = dev->dev.platform_data;
358 unsigned int irq, irq_base, i;
359
360 irq_base = pdata->irq_base;
361
362 for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
363 irq_bank[i].ipu = ipu;
364
365 for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
366 int ret;
367
368 irq = irq_base + i;
369 ret = set_irq_chip(irq, &ipu_irq_chip);
370 if (ret < 0)
371 return ret;
372 ret = set_irq_chip_data(irq, irq_map + i);
373 if (ret < 0)
374 return ret;
375 irq_map[i].ipu = ipu;
376 irq_map[i].irq = irq;
377 irq_map[i].source = -EINVAL;
378 set_irq_handler(irq, handle_level_irq);
379#ifdef CONFIG_ARM
380 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
381#endif
382 }
383
384 set_irq_data(ipu->irq_fn, ipu);
385 set_irq_chained_handler(ipu->irq_fn, ipu_irq_fn);
386
387 set_irq_data(ipu->irq_err, ipu);
388 set_irq_chained_handler(ipu->irq_err, ipu_irq_err);
389
390 return 0;
391}
392
393void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
394{
395 struct ipu_platform_data *pdata = dev->dev.platform_data;
396 unsigned int irq, irq_base;
397
398 irq_base = pdata->irq_base;
399
400 set_irq_chained_handler(ipu->irq_fn, NULL);
401 set_irq_data(ipu->irq_fn, NULL);
402
403 set_irq_chained_handler(ipu->irq_err, NULL);
404 set_irq_data(ipu->irq_err, NULL);
405
406 for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
407#ifdef CONFIG_ARM
408 set_irq_flags(irq, 0);
409#endif
410 set_irq_chip(irq, NULL);
411 set_irq_chip_data(irq, NULL);
412 }
413}
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
index 13946ebd77d6..b4704e150b28 100644
--- a/drivers/firmware/dell_rbu.c
+++ b/drivers/firmware/dell_rbu.c
@@ -576,7 +576,7 @@ static ssize_t read_rbu_image_type(struct kobject *kobj,
576{ 576{
577 int size = 0; 577 int size = 0;
578 if (!pos) 578 if (!pos)
579 size = sprintf(buffer, "%s\n", image_type); 579 size = scnprintf(buffer, count, "%s\n", image_type);
580 return size; 580 return size;
581} 581}
582 582
@@ -648,7 +648,7 @@ static ssize_t read_rbu_packet_size(struct kobject *kobj,
648 int size = 0; 648 int size = 0;
649 if (!pos) { 649 if (!pos) {
650 spin_lock(&rbu_data.lock); 650 spin_lock(&rbu_data.lock);
651 size = sprintf(buffer, "%lu\n", rbu_data.packetsize); 651 size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
652 spin_unlock(&rbu_data.lock); 652 spin_unlock(&rbu_data.lock);
653 } 653 }
654 return size; 654 return size;
diff --git a/drivers/gpio/max7301.c b/drivers/gpio/max7301.c
index 8b24d784db93..3e7f4e06386e 100644
--- a/drivers/gpio/max7301.c
+++ b/drivers/gpio/max7301.c
@@ -217,8 +217,10 @@ static int __devinit max7301_probe(struct spi_device *spi)
217 int i, ret; 217 int i, ret;
218 218
219 pdata = spi->dev.platform_data; 219 pdata = spi->dev.platform_data;
220 if (!pdata || !pdata->base) 220 if (!pdata || !pdata->base) {
221 return -ENODEV; 221 dev_dbg(&spi->dev, "incorrect or missing platform data\n");
222 return -EINVAL;
223 }
222 224
223 /* 225 /*
224 * bits_per_word cannot be configured in platform data 226 * bits_per_word cannot be configured in platform data
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index 55ae9a41897a..f7868243af89 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -267,8 +267,10 @@ static int __devinit max732x_probe(struct i2c_client *client,
267 int ret, nr_port; 267 int ret, nr_port;
268 268
269 pdata = client->dev.platform_data; 269 pdata = client->dev.platform_data;
270 if (pdata == NULL) 270 if (pdata == NULL) {
271 return -ENODEV; 271 dev_dbg(&client->dev, "no platform data\n");
272 return -EINVAL;
273 }
272 274
273 chip = kzalloc(sizeof(struct max732x_chip), GFP_KERNEL); 275 chip = kzalloc(sizeof(struct max732x_chip), GFP_KERNEL);
274 if (chip == NULL) 276 if (chip == NULL)
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c
index 89c1d222e9d1..f6fae0e50e65 100644
--- a/drivers/gpio/mcp23s08.c
+++ b/drivers/gpio/mcp23s08.c
@@ -310,8 +310,10 @@ static int mcp23s08_probe(struct spi_device *spi)
310 unsigned base; 310 unsigned base;
311 311
312 pdata = spi->dev.platform_data; 312 pdata = spi->dev.platform_data;
313 if (!pdata || !gpio_is_valid(pdata->base)) 313 if (!pdata || !gpio_is_valid(pdata->base)) {
314 return -ENODEV; 314 dev_dbg(&spi->dev, "invalid or missing platform data\n");
315 return -EINVAL;
316 }
315 317
316 for (addr = 0; addr < 4; addr++) { 318 for (addr = 0; addr < 4; addr++) {
317 if (!pdata->chip[addr].is_present) 319 if (!pdata->chip[addr].is_present)
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 37f35388a2ae..8dc0164bd51e 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -202,8 +202,10 @@ static int __devinit pca953x_probe(struct i2c_client *client,
202 int ret; 202 int ret;
203 203
204 pdata = client->dev.platform_data; 204 pdata = client->dev.platform_data;
205 if (pdata == NULL) 205 if (pdata == NULL) {
206 return -ENODEV; 206 dev_dbg(&client->dev, "no platform data\n");
207 return -EINVAL;
208 }
207 209
208 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); 210 chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
209 if (chip == NULL) 211 if (chip == NULL)
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c
index 4bc2070dd4a1..9525724be731 100644
--- a/drivers/gpio/pcf857x.c
+++ b/drivers/gpio/pcf857x.c
@@ -188,8 +188,10 @@ static int pcf857x_probe(struct i2c_client *client,
188 int status; 188 int status;
189 189
190 pdata = client->dev.platform_data; 190 pdata = client->dev.platform_data;
191 if (!pdata) 191 if (!pdata) {
192 return -ENODEV; 192 dev_dbg(&client->dev, "no platform data\n");
193 return -EINVAL;
194 }
193 195
194 /* Allocate, initialize, and register this gpio_chip. */ 196 /* Allocate, initialize, and register this gpio_chip. */
195 gpio = kzalloc(sizeof *gpio, GFP_KERNEL); 197 gpio = kzalloc(sizeof *gpio, GFP_KERNEL);
@@ -248,8 +250,10 @@ static int pcf857x_probe(struct i2c_client *client,
248 else 250 else
249 status = i2c_read_le16(client); 251 status = i2c_read_le16(client);
250 252
251 } else 253 } else {
252 status = -ENODEV; 254 dev_dbg(&client->dev, "unsupported number of gpios\n");
255 status = -EINVAL;
256 }
253 257
254 if (status < 0) 258 if (status < 0)
255 goto fail; 259 goto fail;
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d33b8252b58..14796594e5d9 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -33,10 +33,11 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include <linux/module.h> 35#include <linux/module.h>
36#include <asm/agp.h>
37 36
38#if __OS_HAS_AGP 37#if __OS_HAS_AGP
39 38
39#include <asm/agp.h>
40
40/** 41/**
41 * Get AGP information. 42 * Get AGP information.
42 * 43 *
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5b2cbb778162..bfce0992fefb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -194,7 +194,6 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
194 * @type: object type 194 * @type: object type
195 * 195 *
196 * LOCKING: 196 * LOCKING:
197 * Caller must hold DRM mode_config lock.
198 * 197 *
199 * Create a unique identifier based on @ptr in @dev's identifier space. Used 198 * Create a unique identifier based on @ptr in @dev's identifier space. Used
200 * for tracking modes, CRTCs and connectors. 199 * for tracking modes, CRTCs and connectors.
@@ -209,15 +208,15 @@ static int drm_mode_object_get(struct drm_device *dev,
209 int new_id = 0; 208 int new_id = 0;
210 int ret; 209 int ret;
211 210
212 WARN(!mutex_is_locked(&dev->mode_config.mutex),
213 "%s called w/o mode_config lock\n", __func__);
214again: 211again:
215 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { 212 if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
216 DRM_ERROR("Ran out memory getting a mode number\n"); 213 DRM_ERROR("Ran out memory getting a mode number\n");
217 return -EINVAL; 214 return -EINVAL;
218 } 215 }
219 216
217 mutex_lock(&dev->mode_config.idr_mutex);
220 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); 218 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
219 mutex_unlock(&dev->mode_config.idr_mutex);
221 if (ret == -EAGAIN) 220 if (ret == -EAGAIN)
222 goto again; 221 goto again;
223 222
@@ -239,16 +238,20 @@ again:
239static void drm_mode_object_put(struct drm_device *dev, 238static void drm_mode_object_put(struct drm_device *dev,
240 struct drm_mode_object *object) 239 struct drm_mode_object *object)
241{ 240{
241 mutex_lock(&dev->mode_config.idr_mutex);
242 idr_remove(&dev->mode_config.crtc_idr, object->id); 242 idr_remove(&dev->mode_config.crtc_idr, object->id);
243 mutex_unlock(&dev->mode_config.idr_mutex);
243} 244}
244 245
245void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) 246void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
246{ 247{
247 struct drm_mode_object *obj; 248 struct drm_mode_object *obj = NULL;
248 249
250 mutex_lock(&dev->mode_config.idr_mutex);
249 obj = idr_find(&dev->mode_config.crtc_idr, id); 251 obj = idr_find(&dev->mode_config.crtc_idr, id);
250 if (!obj || (obj->type != type) || (obj->id != id)) 252 if (!obj || (obj->type != type) || (obj->id != id))
251 return NULL; 253 obj = NULL;
254 mutex_unlock(&dev->mode_config.idr_mutex);
252 255
253 return obj; 256 return obj;
254} 257}
@@ -786,6 +789,7 @@ EXPORT_SYMBOL(drm_mode_create_dithering_property);
786void drm_mode_config_init(struct drm_device *dev) 789void drm_mode_config_init(struct drm_device *dev)
787{ 790{
788 mutex_init(&dev->mode_config.mutex); 791 mutex_init(&dev->mode_config.mutex);
792 mutex_init(&dev->mode_config.idr_mutex);
789 INIT_LIST_HEAD(&dev->mode_config.fb_list); 793 INIT_LIST_HEAD(&dev->mode_config.fb_list);
790 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list); 794 INIT_LIST_HEAD(&dev->mode_config.fb_kernel_list);
791 INIT_LIST_HEAD(&dev->mode_config.crtc_list); 795 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d8a982b71296..964c5eb1fada 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -36,7 +36,7 @@
36/* 36/*
37 * Detailed mode info for 800x600@60Hz 37 * Detailed mode info for 800x600@60Hz
38 */ 38 */
39static struct drm_display_mode std_mode[] = { 39static struct drm_display_mode std_modes[] = {
40 { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840, 40 { DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840,
41 968, 1056, 0, 600, 601, 605, 628, 0, 41 968, 1056, 0, 600, 601, 605, 628, 0,
42 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 42 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -60,15 +60,18 @@ static struct drm_display_mode std_mode[] = {
60 * changes have occurred. 60 * changes have occurred.
61 * 61 *
62 * FIXME: take into account monitor limits 62 * FIXME: take into account monitor limits
63 *
64 * RETURNS:
65 * Number of modes found on @connector.
63 */ 66 */
64void drm_helper_probe_single_connector_modes(struct drm_connector *connector, 67int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
65 uint32_t maxX, uint32_t maxY) 68 uint32_t maxX, uint32_t maxY)
66{ 69{
67 struct drm_device *dev = connector->dev; 70 struct drm_device *dev = connector->dev;
68 struct drm_display_mode *mode, *t; 71 struct drm_display_mode *mode, *t;
69 struct drm_connector_helper_funcs *connector_funcs = 72 struct drm_connector_helper_funcs *connector_funcs =
70 connector->helper_private; 73 connector->helper_private;
71 int ret; 74 int count = 0;
72 75
73 DRM_DEBUG("%s\n", drm_get_connector_name(connector)); 76 DRM_DEBUG("%s\n", drm_get_connector_name(connector));
74 /* set all modes to the unverified state */ 77 /* set all modes to the unverified state */
@@ -81,14 +84,14 @@ void drm_helper_probe_single_connector_modes(struct drm_connector *connector,
81 DRM_DEBUG("%s is disconnected\n", 84 DRM_DEBUG("%s is disconnected\n",
82 drm_get_connector_name(connector)); 85 drm_get_connector_name(connector));
83 /* TODO set EDID to NULL */ 86 /* TODO set EDID to NULL */
84 return; 87 return 0;
85 } 88 }
86 89
87 ret = (*connector_funcs->get_modes)(connector); 90 count = (*connector_funcs->get_modes)(connector);
91 if (!count)
92 return 0;
88 93
89 if (ret) { 94 drm_mode_connector_list_update(connector);
90 drm_mode_connector_list_update(connector);
91 }
92 95
93 if (maxX && maxY) 96 if (maxX && maxY)
94 drm_mode_validate_size(dev, &connector->modes, maxX, 97 drm_mode_validate_size(dev, &connector->modes, maxX,
@@ -102,25 +105,8 @@ void drm_helper_probe_single_connector_modes(struct drm_connector *connector,
102 105
103 drm_mode_prune_invalid(dev, &connector->modes, true); 106 drm_mode_prune_invalid(dev, &connector->modes, true);
104 107
105 if (list_empty(&connector->modes)) { 108 if (list_empty(&connector->modes))
106 struct drm_display_mode *stdmode; 109 return 0;
107
108 DRM_DEBUG("No valid modes on %s\n",
109 drm_get_connector_name(connector));
110
111 /* Should we do this here ???
112 * When no valid EDID modes are available we end up
113 * here and bailed in the past, now we add a standard
114 * 640x480@60Hz mode and carry on.
115 */
116 stdmode = drm_mode_duplicate(dev, &std_mode[0]);
117 drm_mode_probed_add(connector, stdmode);
118 drm_mode_list_concat(&connector->probed_modes,
119 &connector->modes);
120
121 DRM_DEBUG("Adding standard 640x480 @ 60Hz to %s\n",
122 drm_get_connector_name(connector));
123 }
124 110
125 drm_mode_sort(&connector->modes); 111 drm_mode_sort(&connector->modes);
126 112
@@ -131,20 +117,58 @@ void drm_helper_probe_single_connector_modes(struct drm_connector *connector,
131 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 117 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
132 drm_mode_debug_printmodeline(mode); 118 drm_mode_debug_printmodeline(mode);
133 } 119 }
120
121 return count;
134} 122}
135EXPORT_SYMBOL(drm_helper_probe_single_connector_modes); 123EXPORT_SYMBOL(drm_helper_probe_single_connector_modes);
136 124
137void drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX, 125int drm_helper_probe_connector_modes(struct drm_device *dev, uint32_t maxX,
138 uint32_t maxY) 126 uint32_t maxY)
139{ 127{
140 struct drm_connector *connector; 128 struct drm_connector *connector;
129 int count = 0;
141 130
142 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 131 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
143 drm_helper_probe_single_connector_modes(connector, maxX, maxY); 132 count += drm_helper_probe_single_connector_modes(connector,
133 maxX, maxY);
144 } 134 }
135
136 return count;
145} 137}
146EXPORT_SYMBOL(drm_helper_probe_connector_modes); 138EXPORT_SYMBOL(drm_helper_probe_connector_modes);
147 139
140static void drm_helper_add_std_modes(struct drm_device *dev,
141 struct drm_connector *connector)
142{
143 struct drm_display_mode *mode, *t;
144 int i;
145
146 for (i = 0; i < ARRAY_SIZE(std_modes); i++) {
147 struct drm_display_mode *stdmode;
148
149 /*
150 * When no valid EDID modes are available we end up
151 * here and bailed in the past, now we add some standard
152 * modes and move on.
153 */
154 stdmode = drm_mode_duplicate(dev, &std_modes[i]);
155 drm_mode_probed_add(connector, stdmode);
156 drm_mode_list_concat(&connector->probed_modes,
157 &connector->modes);
158
159 DRM_DEBUG("Adding mode %s to %s\n", stdmode->name,
160 drm_get_connector_name(connector));
161 }
162 drm_mode_sort(&connector->modes);
163
164 DRM_DEBUG("Added std modes on %s\n", drm_get_connector_name(connector));
165 list_for_each_entry_safe(mode, t, &connector->modes, head) {
166 mode->vrefresh = drm_mode_vrefresh(mode);
167
168 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
169 drm_mode_debug_printmodeline(mode);
170 }
171}
148 172
149/** 173/**
150 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config 174 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
@@ -237,6 +261,8 @@ static void drm_enable_connectors(struct drm_device *dev, bool *enabled)
237 261
238 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 262 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
239 enabled[i] = drm_connector_enabled(connector, true); 263 enabled[i] = drm_connector_enabled(connector, true);
264 DRM_DEBUG("connector %d enabled? %s\n", connector->base.id,
265 enabled[i] ? "yes" : "no");
240 any_enabled |= enabled[i]; 266 any_enabled |= enabled[i];
241 i++; 267 i++;
242 } 268 }
@@ -265,11 +291,17 @@ static bool drm_target_preferred(struct drm_device *dev,
265 continue; 291 continue;
266 } 292 }
267 293
294 DRM_DEBUG("looking for preferred mode on connector %d\n",
295 connector->base.id);
296
268 modes[i] = drm_has_preferred_mode(connector, width, height); 297 modes[i] = drm_has_preferred_mode(connector, width, height);
269 if (!modes[i]) { 298 /* No preferred modes, pick one off the list */
299 if (!modes[i] && !list_empty(&connector->modes)) {
270 list_for_each_entry(modes[i], &connector->modes, head) 300 list_for_each_entry(modes[i], &connector->modes, head)
271 break; 301 break;
272 } 302 }
303 DRM_DEBUG("found mode %s\n", modes[i] ? modes[i]->name :
304 "none");
273 i++; 305 i++;
274 } 306 }
275 return true; 307 return true;
@@ -369,6 +401,8 @@ static void drm_setup_crtcs(struct drm_device *dev)
369 int width, height; 401 int width, height;
370 int i, ret; 402 int i, ret;
371 403
404 DRM_DEBUG("\n");
405
372 width = dev->mode_config.max_width; 406 width = dev->mode_config.max_width;
373 height = dev->mode_config.max_height; 407 height = dev->mode_config.max_height;
374 408
@@ -390,6 +424,8 @@ static void drm_setup_crtcs(struct drm_device *dev)
390 if (!ret) 424 if (!ret)
391 DRM_ERROR("Unable to find initial modes\n"); 425 DRM_ERROR("Unable to find initial modes\n");
392 426
427 DRM_DEBUG("picking CRTCs for %dx%d config\n", width, height);
428
393 drm_pick_crtcs(dev, crtcs, modes, 0, width, height); 429 drm_pick_crtcs(dev, crtcs, modes, 0, width, height);
394 430
395 i = 0; 431 i = 0;
@@ -403,6 +439,8 @@ static void drm_setup_crtcs(struct drm_device *dev)
403 } 439 }
404 440
405 if (mode && crtc) { 441 if (mode && crtc) {
442 DRM_DEBUG("desired mode %s set on crtc %d\n",
443 mode->name, crtc->base.id);
406 crtc->desired_mode = mode; 444 crtc->desired_mode = mode;
407 connector->encoder->crtc = crtc; 445 connector->encoder->crtc = crtc;
408 } else 446 } else
@@ -442,6 +480,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
442 int saved_x, saved_y; 480 int saved_x, saved_y;
443 struct drm_encoder *encoder; 481 struct drm_encoder *encoder;
444 bool ret = true; 482 bool ret = true;
483 bool depth_changed, bpp_changed;
445 484
446 adjusted_mode = drm_mode_duplicate(dev, mode); 485 adjusted_mode = drm_mode_duplicate(dev, mode);
447 486
@@ -450,6 +489,15 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
450 if (!crtc->enabled) 489 if (!crtc->enabled)
451 return true; 490 return true;
452 491
492 if (old_fb && crtc->fb) {
493 depth_changed = (old_fb->depth != crtc->fb->depth);
494 bpp_changed = (old_fb->bits_per_pixel !=
495 crtc->fb->bits_per_pixel);
496 } else {
497 depth_changed = true;
498 bpp_changed = true;
499 }
500
453 saved_mode = crtc->mode; 501 saved_mode = crtc->mode;
454 saved_x = crtc->x; 502 saved_x = crtc->x;
455 saved_y = crtc->y; 503 saved_y = crtc->y;
@@ -462,7 +510,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
462 crtc->y = y; 510 crtc->y = y;
463 511
464 if (drm_mode_equal(&saved_mode, &crtc->mode)) { 512 if (drm_mode_equal(&saved_mode, &crtc->mode)) {
465 if (saved_x != crtc->x || saved_y != crtc->y) { 513 if (saved_x != crtc->x || saved_y != crtc->y ||
514 depth_changed || bpp_changed) {
466 crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, 515 crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y,
467 old_fb); 516 old_fb);
468 goto done; 517 goto done;
@@ -568,8 +617,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
568 struct drm_encoder **save_encoders, *new_encoder; 617 struct drm_encoder **save_encoders, *new_encoder;
569 struct drm_framebuffer *old_fb; 618 struct drm_framebuffer *old_fb;
570 bool save_enabled; 619 bool save_enabled;
571 bool changed = false; 620 bool mode_changed = false;
572 bool flip_or_move = false; 621 bool fb_changed = false;
573 struct drm_connector *connector; 622 struct drm_connector *connector;
574 int count = 0, ro, fail = 0; 623 int count = 0, ro, fail = 0;
575 struct drm_crtc_helper_funcs *crtc_funcs; 624 struct drm_crtc_helper_funcs *crtc_funcs;
@@ -597,7 +646,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
597 /* save previous config */ 646 /* save previous config */
598 save_enabled = set->crtc->enabled; 647 save_enabled = set->crtc->enabled;
599 648
600 /* this is meant to be num_connector not num_crtc */ 649 /*
650 * We do mode_config.num_connectors here since we'll look at the
651 * CRTC and encoder associated with each connector later.
652 */
601 save_crtcs = kzalloc(dev->mode_config.num_connector * 653 save_crtcs = kzalloc(dev->mode_config.num_connector *
602 sizeof(struct drm_crtc *), GFP_KERNEL); 654 sizeof(struct drm_crtc *), GFP_KERNEL);
603 if (!save_crtcs) 655 if (!save_crtcs)
@@ -613,21 +665,25 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
613 /* We should be able to check here if the fb has the same properties 665 /* We should be able to check here if the fb has the same properties
614 * and then just flip_or_move it */ 666 * and then just flip_or_move it */
615 if (set->crtc->fb != set->fb) { 667 if (set->crtc->fb != set->fb) {
616 /* if we have no fb then its a change not a flip */ 668 /* If we have no fb then treat it as a full mode set */
617 if (set->crtc->fb == NULL) 669 if (set->crtc->fb == NULL)
618 changed = true; 670 mode_changed = true;
671 else if ((set->fb->bits_per_pixel !=
672 set->crtc->fb->bits_per_pixel) ||
673 set->fb->depth != set->crtc->fb->depth)
674 fb_changed = true;
619 else 675 else
620 flip_or_move = true; 676 fb_changed = true;
621 } 677 }
622 678
623 if (set->x != set->crtc->x || set->y != set->crtc->y) 679 if (set->x != set->crtc->x || set->y != set->crtc->y)
624 flip_or_move = true; 680 fb_changed = true;
625 681
626 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { 682 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
627 DRM_DEBUG("modes are different\n"); 683 DRM_DEBUG("modes are different\n");
628 drm_mode_debug_printmodeline(&set->crtc->mode); 684 drm_mode_debug_printmodeline(&set->crtc->mode);
629 drm_mode_debug_printmodeline(set->mode); 685 drm_mode_debug_printmodeline(set->mode);
630 changed = true; 686 mode_changed = true;
631 } 687 }
632 688
633 /* a) traverse passed in connector list and get encoders for them */ 689 /* a) traverse passed in connector list and get encoders for them */
@@ -650,7 +706,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
650 } 706 }
651 707
652 if (new_encoder != connector->encoder) { 708 if (new_encoder != connector->encoder) {
653 changed = true; 709 mode_changed = true;
654 connector->encoder = new_encoder; 710 connector->encoder = new_encoder;
655 } 711 }
656 } 712 }
@@ -677,16 +733,16 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
677 new_crtc = set->crtc; 733 new_crtc = set->crtc;
678 } 734 }
679 if (new_crtc != connector->encoder->crtc) { 735 if (new_crtc != connector->encoder->crtc) {
680 changed = true; 736 mode_changed = true;
681 connector->encoder->crtc = new_crtc; 737 connector->encoder->crtc = new_crtc;
682 } 738 }
683 } 739 }
684 740
685 /* mode_set_base is not a required function */ 741 /* mode_set_base is not a required function */
686 if (flip_or_move && !crtc_funcs->mode_set_base) 742 if (fb_changed && !crtc_funcs->mode_set_base)
687 changed = true; 743 mode_changed = true;
688 744
689 if (changed) { 745 if (mode_changed) {
690 old_fb = set->crtc->fb; 746 old_fb = set->crtc->fb;
691 set->crtc->fb = set->fb; 747 set->crtc->fb = set->fb;
692 set->crtc->enabled = (set->mode != NULL); 748 set->crtc->enabled = (set->mode != NULL);
@@ -705,7 +761,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
705 set->crtc->desired_mode = set->mode; 761 set->crtc->desired_mode = set->mode;
706 } 762 }
707 drm_helper_disable_unused_functions(dev); 763 drm_helper_disable_unused_functions(dev);
708 } else if (flip_or_move) { 764 } else if (fb_changed) {
709 old_fb = set->crtc->fb; 765 old_fb = set->crtc->fb;
710 if (set->crtc->fb != set->fb) 766 if (set->crtc->fb != set->fb)
711 set->crtc->fb = set->fb; 767 set->crtc->fb = set->fb;
@@ -764,10 +820,31 @@ bool drm_helper_plugged_event(struct drm_device *dev)
764 */ 820 */
765bool drm_helper_initial_config(struct drm_device *dev, bool can_grow) 821bool drm_helper_initial_config(struct drm_device *dev, bool can_grow)
766{ 822{
767 int ret = false; 823 struct drm_connector *connector;
824 int count = 0;
768 825
769 drm_helper_plugged_event(dev); 826 count = drm_helper_probe_connector_modes(dev,
770 return ret; 827 dev->mode_config.max_width,
828 dev->mode_config.max_height);
829
830 /*
831 * None of the available connectors had any modes, so add some
832 * and try to light them up anyway
833 */
834 if (!count) {
835 DRM_ERROR("connectors have no modes, using standard modes\n");
836 list_for_each_entry(connector,
837 &dev->mode_config.connector_list,
838 head)
839 drm_helper_add_std_modes(dev, connector);
840 }
841
842 drm_setup_crtcs(dev);
843
844 /* alert the driver fb layer */
845 dev->mode_config.funcs->fb_changed(dev);
846
847 return 0;
771} 848}
772EXPORT_SYMBOL(drm_helper_initial_config); 849EXPORT_SYMBOL(drm_helper_initial_config);
773 850
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 5ff88d952226..14c7a23dc157 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -294,6 +294,7 @@ EXPORT_SYMBOL(drm_init);
294 */ 294 */
295static void drm_cleanup(struct drm_device * dev) 295static void drm_cleanup(struct drm_device * dev)
296{ 296{
297 struct drm_map_list *r_list, *list_temp;
297 DRM_DEBUG("\n"); 298 DRM_DEBUG("\n");
298 299
299 if (!dev) { 300 if (!dev) {
@@ -325,6 +326,9 @@ static void drm_cleanup(struct drm_device * dev)
325 drm_ht_remove(&dev->map_hash); 326 drm_ht_remove(&dev->map_hash);
326 drm_ctxbitmap_cleanup(dev); 327 drm_ctxbitmap_cleanup(dev);
327 328
329 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
330 drm_rmmap(dev, r_list->map);
331
328 if (drm_core_check_feature(dev, DRIVER_MODESET)) 332 if (drm_core_check_feature(dev, DRIVER_MODESET))
329 drm_put_minor(&dev->control); 333 drm_put_minor(&dev->control);
330 334
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0fbb0da342cb..5a4d3244758a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -660,7 +660,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
660 660
661 edid = (struct edid *)drm_ddc_read(adapter); 661 edid = (struct edid *)drm_ddc_read(adapter);
662 if (!edid) { 662 if (!edid) {
663 dev_warn(&connector->dev->pdev->dev, "%s: no EDID data\n", 663 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
664 drm_get_connector_name(connector)); 664 drm_get_connector_name(connector));
665 return NULL; 665 return NULL;
666 } 666 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 724e505873cf..477caa1b1e4b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -267,7 +267,8 @@ EXPORT_SYMBOL(drm_irq_install);
267 */ 267 */
268int drm_irq_uninstall(struct drm_device * dev) 268int drm_irq_uninstall(struct drm_device * dev)
269{ 269{
270 int irq_enabled; 270 unsigned long irqflags;
271 int irq_enabled, i;
271 272
272 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 273 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
273 return -EINVAL; 274 return -EINVAL;
@@ -277,6 +278,16 @@ int drm_irq_uninstall(struct drm_device * dev)
277 dev->irq_enabled = 0; 278 dev->irq_enabled = 0;
278 mutex_unlock(&dev->struct_mutex); 279 mutex_unlock(&dev->struct_mutex);
279 280
281 /*
282 * Wake up any waiters so they don't hang.
283 */
284 spin_lock_irqsave(&dev->vbl_lock, irqflags);
285 for (i = 0; i < dev->num_crtcs; i++) {
286 DRM_WAKEUP(&dev->vbl_queue[i]);
287 dev->vblank_enabled[i] = 0;
288 }
289 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
290
280 if (!irq_enabled) 291 if (!irq_enabled)
281 return -EINVAL; 292 return -EINVAL;
282 293
@@ -652,8 +663,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
652 vblwait->request.sequence, crtc); 663 vblwait->request.sequence, crtc);
653 dev->last_vblank_wait[crtc] = vblwait->request.sequence; 664 dev->last_vblank_wait[crtc] = vblwait->request.sequence;
654 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, 665 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
655 ((drm_vblank_count(dev, crtc) 666 (((drm_vblank_count(dev, crtc) -
656 - vblwait->request.sequence) <= (1 << 23))); 667 vblwait->request.sequence) <= (1 << 23)) ||
668 !dev->irq_enabled));
657 669
658 if (ret != -EINTR) { 670 if (ret != -EINTR) {
659 struct timeval now; 671 struct timeval now;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 5ca132afa4f2..46bb923b097c 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -118,12 +118,20 @@ static void drm_master_destroy(struct kref *kref)
118 struct drm_master *master = container_of(kref, struct drm_master, refcount); 118 struct drm_master *master = container_of(kref, struct drm_master, refcount);
119 struct drm_magic_entry *pt, *next; 119 struct drm_magic_entry *pt, *next;
120 struct drm_device *dev = master->minor->dev; 120 struct drm_device *dev = master->minor->dev;
121 struct drm_map_list *r_list, *list_temp;
121 122
122 list_del(&master->head); 123 list_del(&master->head);
123 124
124 if (dev->driver->master_destroy) 125 if (dev->driver->master_destroy)
125 dev->driver->master_destroy(dev, master); 126 dev->driver->master_destroy(dev, master);
126 127
128 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
129 if (r_list->master == master) {
130 drm_rmmap_locked(dev, r_list->map);
131 r_list = NULL;
132 }
133 }
134
127 if (master->unique) { 135 if (master->unique) {
128 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER); 136 drm_free(master->unique, master->unique_size, DRM_MEM_DRIVER);
129 master->unique = NULL; 137 master->unique = NULL;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 62a4bf7b49df..ee64b7301f67 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -177,6 +177,14 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
177 drm_i915_private_t *dev_priv = dev->dev_private; 177 drm_i915_private_t *dev_priv = dev->dev_private;
178 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 178 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
179 179
180 master_priv->sarea = drm_getsarea(dev);
181 if (master_priv->sarea) {
182 master_priv->sarea_priv = (drm_i915_sarea_t *)
183 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
184 } else {
185 DRM_DEBUG("sarea not found assuming DRI2 userspace\n");
186 }
187
180 if (init->ring_size != 0) { 188 if (init->ring_size != 0) {
181 if (dev_priv->ring.ring_obj != NULL) { 189 if (dev_priv->ring.ring_obj != NULL) {
182 i915_dma_cleanup(dev); 190 i915_dma_cleanup(dev);
@@ -936,13 +944,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
936 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 944 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
937 0xff000000; 945 0xff000000;
938 946
939 DRM_DEBUG("*** fb base 0x%08lx\n", dev->mode_config.fb_base); 947 if (IS_MOBILE(dev) || IS_I9XX(dev))
940
941 if (IS_MOBILE(dev) || (IS_I9XX(dev) && !IS_I965G(dev) && !IS_G33(dev)))
942 dev_priv->cursor_needs_physical = true; 948 dev_priv->cursor_needs_physical = true;
943 else 949 else
944 dev_priv->cursor_needs_physical = false; 950 dev_priv->cursor_needs_physical = false;
945 951
952 if (IS_I965G(dev) || IS_G33(dev))
953 dev_priv->cursor_needs_physical = false;
954
946 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 955 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
947 if (ret) 956 if (ret)
948 goto kfree_devname; 957 goto kfree_devname;
@@ -1152,6 +1161,8 @@ int i915_driver_unload(struct drm_device *dev)
1152 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1161 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1153 intel_modeset_cleanup(dev); 1162 intel_modeset_cleanup(dev);
1154 1163
1164 i915_gem_free_all_phys_object(dev);
1165
1155 mutex_lock(&dev->struct_mutex); 1166 mutex_lock(&dev->struct_mutex);
1156 i915_gem_cleanup_ringbuffer(dev); 1167 i915_gem_cleanup_ringbuffer(dev);
1157 mutex_unlock(&dev->struct_mutex); 1168 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 563de18063fd..e13518252007 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -72,6 +72,18 @@ enum pipe {
72#define WATCH_INACTIVE 0 72#define WATCH_INACTIVE 0
73#define WATCH_PWRITE 0 73#define WATCH_PWRITE 0
74 74
75#define I915_GEM_PHYS_CURSOR_0 1
76#define I915_GEM_PHYS_CURSOR_1 2
77#define I915_GEM_PHYS_OVERLAY_REGS 3
78#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
79
80struct drm_i915_gem_phys_object {
81 int id;
82 struct page **page_list;
83 drm_dma_handle_t *handle;
84 struct drm_gem_object *cur_obj;
85};
86
75typedef struct _drm_i915_ring_buffer { 87typedef struct _drm_i915_ring_buffer {
76 int tail_mask; 88 int tail_mask;
77 unsigned long Size; 89 unsigned long Size;
@@ -358,6 +370,9 @@ typedef struct drm_i915_private {
358 uint32_t bit_6_swizzle_x; 370 uint32_t bit_6_swizzle_x;
359 /** Bit 6 swizzling required for Y tiling */ 371 /** Bit 6 swizzling required for Y tiling */
360 uint32_t bit_6_swizzle_y; 372 uint32_t bit_6_swizzle_y;
373
374 /* storage for physical objects */
375 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
361 } mm; 376 } mm;
362} drm_i915_private_t; 377} drm_i915_private_t;
363 378
@@ -436,6 +451,9 @@ struct drm_i915_gem_object {
436 /** User space pin count and filp owning the pin */ 451 /** User space pin count and filp owning the pin */
437 uint32_t user_pin_count; 452 uint32_t user_pin_count;
438 struct drm_file *pin_filp; 453 struct drm_file *pin_filp;
454
455 /** for phy allocated objects */
456 struct drm_i915_gem_phys_object *phys_obj;
439}; 457};
440 458
441/** 459/**
@@ -598,6 +616,11 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
598int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 616int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
599int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 617int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
600 int write); 618 int write);
619int i915_gem_attach_phys_object(struct drm_device *dev,
620 struct drm_gem_object *obj, int id);
621void i915_gem_detach_phys_object(struct drm_device *dev,
622 struct drm_gem_object *obj);
623void i915_gem_free_all_phys_object(struct drm_device *dev);
601 624
602/* i915_gem_tiling.c */ 625/* i915_gem_tiling.c */
603void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 626void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1384d6686555..debad5c04cc0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -55,6 +55,9 @@ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
55static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 55static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
57static int i915_gem_evict_something(struct drm_device *dev); 57static int i915_gem_evict_something(struct drm_device *dev);
58static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
59 struct drm_i915_gem_pwrite *args,
60 struct drm_file *file_priv);
58 61
59int i915_gem_do_init(struct drm_device *dev, unsigned long start, 62int i915_gem_do_init(struct drm_device *dev, unsigned long start,
60 unsigned long end) 63 unsigned long end)
@@ -386,8 +389,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
386 * pread/pwrite currently are reading and writing from the CPU 389 * pread/pwrite currently are reading and writing from the CPU
387 * perspective, requiring manual detiling by the client. 390 * perspective, requiring manual detiling by the client.
388 */ 391 */
389 if (obj_priv->tiling_mode == I915_TILING_NONE && 392 if (obj_priv->phys_obj)
390 dev->gtt_total != 0) 393 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
394 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
395 dev->gtt_total != 0)
391 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); 396 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
392 else 397 else
393 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); 398 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
@@ -2858,6 +2863,9 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2858 while (obj_priv->pin_count > 0) 2863 while (obj_priv->pin_count > 0)
2859 i915_gem_object_unpin(obj); 2864 i915_gem_object_unpin(obj);
2860 2865
2866 if (obj_priv->phys_obj)
2867 i915_gem_detach_phys_object(dev, obj);
2868
2861 i915_gem_object_unbind(obj); 2869 i915_gem_object_unbind(obj);
2862 2870
2863 list = &obj->map_list; 2871 list = &obj->map_list;
@@ -3293,3 +3301,180 @@ i915_gem_load(struct drm_device *dev)
3293 3301
3294 i915_gem_detect_bit_6_swizzle(dev); 3302 i915_gem_detect_bit_6_swizzle(dev);
3295} 3303}
3304
3305/*
3306 * Create a physically contiguous memory object for this object
3307 * e.g. for cursor + overlay regs
3308 */
3309int i915_gem_init_phys_object(struct drm_device *dev,
3310 int id, int size)
3311{
3312 drm_i915_private_t *dev_priv = dev->dev_private;
3313 struct drm_i915_gem_phys_object *phys_obj;
3314 int ret;
3315
3316 if (dev_priv->mm.phys_objs[id - 1] || !size)
3317 return 0;
3318
3319 phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3320 if (!phys_obj)
3321 return -ENOMEM;
3322
3323 phys_obj->id = id;
3324
3325 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
3326 if (!phys_obj->handle) {
3327 ret = -ENOMEM;
3328 goto kfree_obj;
3329 }
3330#ifdef CONFIG_X86
3331 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3332#endif
3333
3334 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3335
3336 return 0;
3337kfree_obj:
3338 drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3339 return ret;
3340}
3341
3342void i915_gem_free_phys_object(struct drm_device *dev, int id)
3343{
3344 drm_i915_private_t *dev_priv = dev->dev_private;
3345 struct drm_i915_gem_phys_object *phys_obj;
3346
3347 if (!dev_priv->mm.phys_objs[id - 1])
3348 return;
3349
3350 phys_obj = dev_priv->mm.phys_objs[id - 1];
3351 if (phys_obj->cur_obj) {
3352 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3353 }
3354
3355#ifdef CONFIG_X86
3356 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3357#endif
3358 drm_pci_free(dev, phys_obj->handle);
3359 kfree(phys_obj);
3360 dev_priv->mm.phys_objs[id - 1] = NULL;
3361}
3362
3363void i915_gem_free_all_phys_object(struct drm_device *dev)
3364{
3365 int i;
3366
3367 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3368 i915_gem_free_phys_object(dev, i);
3369}
3370
3371void i915_gem_detach_phys_object(struct drm_device *dev,
3372 struct drm_gem_object *obj)
3373{
3374 struct drm_i915_gem_object *obj_priv;
3375 int i;
3376 int ret;
3377 int page_count;
3378
3379 obj_priv = obj->driver_private;
3380 if (!obj_priv->phys_obj)
3381 return;
3382
3383 ret = i915_gem_object_get_page_list(obj);
3384 if (ret)
3385 goto out;
3386
3387 page_count = obj->size / PAGE_SIZE;
3388
3389 for (i = 0; i < page_count; i++) {
3390 char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
3391 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3392
3393 memcpy(dst, src, PAGE_SIZE);
3394 kunmap_atomic(dst, KM_USER0);
3395 }
3396 drm_clflush_pages(obj_priv->page_list, page_count);
3397 drm_agp_chipset_flush(dev);
3398out:
3399 obj_priv->phys_obj->cur_obj = NULL;
3400 obj_priv->phys_obj = NULL;
3401}
3402
3403int
3404i915_gem_attach_phys_object(struct drm_device *dev,
3405 struct drm_gem_object *obj, int id)
3406{
3407 drm_i915_private_t *dev_priv = dev->dev_private;
3408 struct drm_i915_gem_object *obj_priv;
3409 int ret = 0;
3410 int page_count;
3411 int i;
3412
3413 if (id > I915_MAX_PHYS_OBJECT)
3414 return -EINVAL;
3415
3416 obj_priv = obj->driver_private;
3417
3418 if (obj_priv->phys_obj) {
3419 if (obj_priv->phys_obj->id == id)
3420 return 0;
3421 i915_gem_detach_phys_object(dev, obj);
3422 }
3423
3424
3425 /* create a new object */
3426 if (!dev_priv->mm.phys_objs[id - 1]) {
3427 ret = i915_gem_init_phys_object(dev, id,
3428 obj->size);
3429 if (ret) {
3430 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
3431 goto out;
3432 }
3433 }
3434
3435 /* bind to the object */
3436 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3437 obj_priv->phys_obj->cur_obj = obj;
3438
3439 ret = i915_gem_object_get_page_list(obj);
3440 if (ret) {
3441 DRM_ERROR("failed to get page list\n");
3442 goto out;
3443 }
3444
3445 page_count = obj->size / PAGE_SIZE;
3446
3447 for (i = 0; i < page_count; i++) {
3448 char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
3449 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3450
3451 memcpy(dst, src, PAGE_SIZE);
3452 kunmap_atomic(src, KM_USER0);
3453 }
3454
3455 return 0;
3456out:
3457 return ret;
3458}
3459
3460static int
3461i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
3462 struct drm_i915_gem_pwrite *args,
3463 struct drm_file *file_priv)
3464{
3465 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3466 void *obj_addr;
3467 int ret;
3468 char __user *user_data;
3469
3470 user_data = (char __user *) (uintptr_t) args->data_ptr;
3471 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
3472
3473 DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size);
3474 ret = copy_from_user(obj_addr, user_data, args->size);
3475 if (ret)
3476 return -EFAULT;
3477
3478 drm_agp_chipset_flush(dev);
3479 return 0;
3480}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0cadafbef411..6290219de6c8 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -411,6 +411,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
411{ 411{
412 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 412 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
413 unsigned long irqflags; 413 unsigned long irqflags;
414 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
415 u32 pipeconf;
416
417 pipeconf = I915_READ(pipeconf_reg);
418 if (!(pipeconf & PIPEACONF_ENABLE))
419 return -EINVAL;
414 420
415 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 421 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
416 if (IS_I965G(dev)) 422 if (IS_I965G(dev))
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8ccb9c3ab868..31c3732b7a69 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -401,6 +401,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
401 I915_WRITE(dspstride, crtc->fb->pitch); 401 I915_WRITE(dspstride, crtc->fb->pitch);
402 402
403 dspcntr = I915_READ(dspcntr_reg); 403 dspcntr = I915_READ(dspcntr_reg);
404 /* Mask out pixel format bits in case we change it */
405 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
404 switch (crtc->fb->bits_per_pixel) { 406 switch (crtc->fb->bits_per_pixel) {
405 case 8: 407 case 8:
406 dspcntr |= DISPPLANE_8BPP; 408 dspcntr |= DISPPLANE_8BPP;
@@ -1014,21 +1016,25 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1014 1016
1015 if (bo->size < width * height * 4) { 1017 if (bo->size < width * height * 4) {
1016 DRM_ERROR("buffer is to small\n"); 1018 DRM_ERROR("buffer is to small\n");
1017 drm_gem_object_unreference(bo); 1019 ret = -ENOMEM;
1018 return -ENOMEM; 1020 goto fail;
1019 } 1021 }
1020 1022
1021 if (dev_priv->cursor_needs_physical) { 1023 /* we only need to pin inside GTT if cursor is non-phy */
1022 addr = dev->agp->base + obj_priv->gtt_offset; 1024 if (!dev_priv->cursor_needs_physical) {
1023 } else { 1025 ret = i915_gem_object_pin(bo, PAGE_SIZE);
1026 if (ret) {
1027 DRM_ERROR("failed to pin cursor bo\n");
1028 goto fail;
1029 }
1024 addr = obj_priv->gtt_offset; 1030 addr = obj_priv->gtt_offset;
1025 } 1031 } else {
1026 1032 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
1027 ret = i915_gem_object_pin(bo, PAGE_SIZE); 1033 if (ret) {
1028 if (ret) { 1034 DRM_ERROR("failed to attach phys object\n");
1029 DRM_ERROR("failed to pin cursor bo\n"); 1035 goto fail;
1030 drm_gem_object_unreference(bo); 1036 }
1031 return ret; 1037 addr = obj_priv->phys_obj->handle->busaddr;
1032 } 1038 }
1033 1039
1034 temp = 0; 1040 temp = 0;
@@ -1041,14 +1047,25 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1041 I915_WRITE(base, addr); 1047 I915_WRITE(base, addr);
1042 1048
1043 if (intel_crtc->cursor_bo) { 1049 if (intel_crtc->cursor_bo) {
1044 i915_gem_object_unpin(intel_crtc->cursor_bo); 1050 if (dev_priv->cursor_needs_physical) {
1051 if (intel_crtc->cursor_bo != bo)
1052 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
1053 } else
1054 i915_gem_object_unpin(intel_crtc->cursor_bo);
1055 mutex_lock(&dev->struct_mutex);
1045 drm_gem_object_unreference(intel_crtc->cursor_bo); 1056 drm_gem_object_unreference(intel_crtc->cursor_bo);
1057 mutex_unlock(&dev->struct_mutex);
1046 } 1058 }
1047 1059
1048 intel_crtc->cursor_addr = addr; 1060 intel_crtc->cursor_addr = addr;
1049 intel_crtc->cursor_bo = bo; 1061 intel_crtc->cursor_bo = bo;
1050 1062
1051 return 0; 1063 return 0;
1064fail:
1065 mutex_lock(&dev->struct_mutex);
1066 drm_gem_object_unreference(bo);
1067 mutex_unlock(&dev->struct_mutex);
1068 return ret;
1052} 1069}
1053 1070
1054static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 1071static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index a5a2f5339e9e..5ee9d4c25753 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -137,10 +137,6 @@ struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
137 chan->reg = reg; 137 chan->reg = reg;
138 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); 138 snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
139 chan->adapter.owner = THIS_MODULE; 139 chan->adapter.owner = THIS_MODULE;
140#ifndef I2C_HW_B_INTELFB
141#define I2C_HW_B_INTELFB I2C_HW_B_I810
142#endif
143 chan->adapter.id = I2C_HW_B_INTELFB;
144 chan->adapter.algo_data = &chan->algo; 140 chan->adapter.algo_data = &chan->algo;
145 chan->adapter.dev.parent = &dev->pdev->dev; 141 chan->adapter.dev.parent = &dev->pdev->dev;
146 chan->algo.setsda = set_data; 142 chan->algo.setsda = set_data;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ccecfaf6307b..6b1148fc2cbe 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -340,6 +340,18 @@ static void intel_lvds_destroy(struct drm_connector *connector)
340 kfree(connector); 340 kfree(connector);
341} 341}
342 342
343static int intel_lvds_set_property(struct drm_connector *connector,
344 struct drm_property *property,
345 uint64_t value)
346{
347 struct drm_device *dev = connector->dev;
348
349 if (property == dev->mode_config.dpms_property && connector->encoder)
350 intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf));
351
352 return 0;
353}
354
343static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { 355static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
344 .dpms = intel_lvds_dpms, 356 .dpms = intel_lvds_dpms,
345 .mode_fixup = intel_lvds_mode_fixup, 357 .mode_fixup = intel_lvds_mode_fixup,
@@ -359,6 +371,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
359 .restore = intel_lvds_restore, 371 .restore = intel_lvds_restore,
360 .detect = intel_lvds_detect, 372 .detect = intel_lvds_detect,
361 .fill_modes = drm_helper_probe_single_connector_modes, 373 .fill_modes = drm_helper_probe_single_connector_modes,
374 .set_property = intel_lvds_set_property,
362 .destroy = intel_lvds_destroy, 375 .destroy = intel_lvds_destroy,
363}; 376};
364 377
@@ -456,6 +469,13 @@ void intel_lvds_init(struct drm_device *dev)
456 dev_priv->panel_fixed_mode = 469 dev_priv->panel_fixed_mode =
457 drm_mode_duplicate(dev, dev_priv->vbt_mode); 470 drm_mode_duplicate(dev, dev_priv->vbt_mode);
458 mutex_unlock(&dev->mode_config.mutex); 471 mutex_unlock(&dev->mode_config.mutex);
472 if (dev_priv->panel_fixed_mode) {
473 dev_priv->panel_fixed_mode->type |=
474 DRM_MODE_TYPE_PREFERRED;
475 drm_mode_probed_add(connector,
476 dev_priv->panel_fixed_mode);
477 goto out;
478 }
459 } 479 }
460 480
461 /* 481 /*
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 4b33bc82cc24..b84bf066879b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -189,6 +189,16 @@ config SENSORS_ADT7473
189 This driver can also be built as a module. If so, the module 189 This driver can also be built as a module. If so, the module
190 will be called adt7473. 190 will be called adt7473.
191 191
192config SENSORS_ADT7475
193 tristate "Analog Devices ADT7475"
194 depends on I2C && EXPERIMENTAL
195 help
196 If you say yes here you get support for the Analog Devices
197 ADT7475 hardware monitoring chips.
198
199 This driver can also be build as a module. If so, the module
200 will be called adt7475.
201
192config SENSORS_K8TEMP 202config SENSORS_K8TEMP
193 tristate "AMD Athlon64/FX or Opteron temperature sensor" 203 tristate "AMD Athlon64/FX or Opteron temperature sensor"
194 depends on X86 && PCI && EXPERIMENTAL 204 depends on X86 && PCI && EXPERIMENTAL
@@ -861,6 +871,8 @@ config SENSORS_HDAPS
861config SENSORS_LIS3LV02D 871config SENSORS_LIS3LV02D
862 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer" 872 tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer"
863 depends on ACPI && INPUT 873 depends on ACPI && INPUT
874 select NEW_LEDS
875 select LEDS_CLASS
864 default n 876 default n
865 help 877 help
866 This driver provides support for the LIS3LV02Dx accelerometer. In 878 This driver provides support for the LIS3LV02Dx accelerometer. In
@@ -872,10 +884,16 @@ config SENSORS_LIS3LV02D
872 /sys/devices/platform/lis3lv02d. 884 /sys/devices/platform/lis3lv02d.
873 885
874 This driver also provides an absolute input class device, allowing 886 This driver also provides an absolute input class device, allowing
875 the laptop to act as a pinball machine-esque joystick. 887 the laptop to act as a pinball machine-esque joystick. On HP laptops,
888 if the led infrastructure is activated, support for a led indicating
889 disk protection will be provided as hp:red:hddprotection.
876 890
877 This driver can also be built as a module. If so, the module 891 This driver can also be built as modules. If so, the core module
878 will be called lis3lv02d. 892 will be called lis3lv02d and a specific module for HP laptops will be
893 called hp_accel.
894
895 Say Y here if you have an applicable laptop and want to experience
896 the awesome power of lis3lv02d.
879 897
880config SENSORS_APPLESMC 898config SENSORS_APPLESMC
881 tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)" 899 tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 19cb1ace3eb4..2e80f37f39eb 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -28,6 +28,8 @@ obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
28obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o 28obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
29obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o 29obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
30obj-$(CONFIG_SENSORS_ADT7473) += adt7473.o 30obj-$(CONFIG_SENSORS_ADT7473) += adt7473.o
31obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
32
31obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o 33obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
32obj-$(CONFIG_SENSORS_AMS) += ams/ 34obj-$(CONFIG_SENSORS_AMS) += ams/
33obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o 35obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 70bb854086df..e52b38806d03 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -279,7 +279,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
279 { "OTES1 Fan", 36, 2, 60, 1, 0 }, 279 { "OTES1 Fan", 36, 2, 60, 1, 0 },
280 { NULL, 0, 0, 0, 0, 0 } } 280 { NULL, 0, 0, 0, 0, 0 } }
281 }, 281 },
282 { 0x0011, "AT8 32X(ATI RD580-ULI M1575)", { 282 { 0x0011, "AT8 32X", {
283 { "CPU Core", 0, 0, 10, 1, 0 }, 283 { "CPU Core", 0, 0, 10, 1, 0 },
284 { "DDR", 1, 0, 20, 1, 0 }, 284 { "DDR", 1, 0, 20, 1, 0 },
285 { "DDR VTT", 2, 0, 10, 1, 0 }, 285 { "DDR VTT", 2, 0, 10, 1, 0 },
@@ -402,7 +402,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
402 { "AUX3 Fan", 36, 2, 60, 1, 0 }, 402 { "AUX3 Fan", 36, 2, 60, 1, 0 },
403 { NULL, 0, 0, 0, 0, 0 } } 403 { NULL, 0, 0, 0, 0, 0 } }
404 }, 404 },
405 { 0x0016, "AW9D-MAX (Intel i975-ICH7)", { 405 { 0x0016, "AW9D-MAX", {
406 { "CPU Core", 0, 0, 10, 1, 0 }, 406 { "CPU Core", 0, 0, 10, 1, 0 },
407 { "DDR2", 1, 0, 20, 1, 0 }, 407 { "DDR2", 1, 0, 20, 1, 0 },
408 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 408 { "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -482,7 +482,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
482 { "AUX3 Fan", 36, 2, 60, 1, 0 }, 482 { "AUX3 Fan", 36, 2, 60, 1, 0 },
483 { NULL, 0, 0, 0, 0, 0 } } 483 { NULL, 0, 0, 0, 0, 0 } }
484 }, 484 },
485 { 0x0019, NULL /* Unknown, need DMI string */, { 485 { 0x0019, "IN9 32X MAX", {
486 { "CPU Core", 7, 0, 10, 1, 0 }, 486 { "CPU Core", 7, 0, 10, 1, 0 },
487 { "DDR2", 13, 0, 20, 1, 0 }, 487 { "DDR2", 13, 0, 20, 1, 0 },
488 { "DDR2 VTT", 14, 0, 10, 1, 0 }, 488 { "DDR2 VTT", 14, 0, 10, 1, 0 },
@@ -509,7 +509,7 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
509 { "AUX3 FAN", 36, 2, 60, 1, 0 }, 509 { "AUX3 FAN", 36, 2, 60, 1, 0 },
510 { NULL, 0, 0, 0, 0, 0 } } 510 { NULL, 0, 0, 0, 0, 0 } }
511 }, 511 },
512 { 0x001A, "IP35 Pro(Intel P35-ICH9R)", { 512 { 0x001A, "IP35 Pro", {
513 { "CPU Core", 0, 0, 10, 1, 0 }, 513 { "CPU Core", 0, 0, 10, 1, 0 },
514 { "DDR2", 1, 0, 20, 1, 0 }, 514 { "DDR2", 1, 0, 20, 1, 0 },
515 { "DDR2 VTT", 2, 0, 10, 1, 0 }, 515 { "DDR2 VTT", 2, 0, 10, 1, 0 },
@@ -1128,6 +1128,7 @@ static int __init abituguru3_dmi_detect(void)
1128{ 1128{
1129 const char *board_vendor, *board_name; 1129 const char *board_vendor, *board_name;
1130 int i, err = (force) ? 1 : -ENODEV; 1130 int i, err = (force) ? 1 : -ENODEV;
1131 size_t sublen;
1131 1132
1132 board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR); 1133 board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
1133 if (!board_vendor || strcmp(board_vendor, "http://www.abit.com.tw/")) 1134 if (!board_vendor || strcmp(board_vendor, "http://www.abit.com.tw/"))
@@ -1137,9 +1138,20 @@ static int __init abituguru3_dmi_detect(void)
1137 if (!board_name) 1138 if (!board_name)
1138 return err; 1139 return err;
1139 1140
1141 /* At the moment, we don't care about the part of the vendor
1142 * DMI string contained in brackets. Truncate the string at
1143 * the first occurrence of a bracket. Trim any trailing space
1144 * from the substring.
1145 */
1146 sublen = strcspn(board_name, "(");
1147 while (sublen > 0 && board_name[sublen - 1] == ' ')
1148 sublen--;
1149
1140 for (i = 0; abituguru3_motherboards[i].id; i++) { 1150 for (i = 0; abituguru3_motherboards[i].id; i++) {
1141 const char *dmi_name = abituguru3_motherboards[i].dmi_name; 1151 const char *dmi_name = abituguru3_motherboards[i].dmi_name;
1142 if (dmi_name && !strcmp(dmi_name, board_name)) 1152 if (!dmi_name || strlen(dmi_name) != sublen)
1153 continue;
1154 if (!strncasecmp(board_name, dmi_name, sublen))
1143 break; 1155 break;
1144 } 1156 }
1145 1157
@@ -1153,7 +1165,7 @@ static int __init abituguru3_dmi_detect(void)
1153 1165
1154static inline int abituguru3_dmi_detect(void) 1166static inline int abituguru3_dmi_detect(void)
1155{ 1167{
1156 return -ENODEV; 1168 return 1;
1157} 1169}
1158 1170
1159#endif /* CONFIG_DMI */ 1171#endif /* CONFIG_DMI */
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
new file mode 100644
index 000000000000..d39877a7da63
--- /dev/null
+++ b/drivers/hwmon/adt7475.c
@@ -0,0 +1,1221 @@
1/*
2 * adt7475 - Thermal sensor driver for the ADT7475 chip and derivatives
3 * Copyright (C) 2007-2008, Advanced Micro Devices, Inc.
4 * Copyright (C) 2008 Jordan Crouse <jordan@cosmicpenguin.net>
5 * Copyright (C) 2008 Hans de Goede <hdegoede@redhat.com>
6
7 * Derived from the lm83 driver by Jean Delvare
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/i2c.h>
18#include <linux/hwmon.h>
19#include <linux/hwmon-sysfs.h>
20#include <linux/err.h>
21
22/* Indexes for the sysfs hooks */
23
24#define INPUT 0
25#define MIN 1
26#define MAX 2
27#define CONTROL 3
28#define OFFSET 3
29#define AUTOMIN 4
30#define THERM 5
31#define HYSTERSIS 6
32
33/* These are unique identifiers for the sysfs functions - unlike the
34 numbers above, these are not also indexes into an array
35*/
36
37#define ALARM 9
38#define FAULT 10
39
40/* 7475 Common Registers */
41
42#define REG_VOLTAGE_BASE 0x21
43#define REG_TEMP_BASE 0x25
44#define REG_TACH_BASE 0x28
45#define REG_PWM_BASE 0x30
46#define REG_PWM_MAX_BASE 0x38
47
48#define REG_DEVID 0x3D
49#define REG_VENDID 0x3E
50
51#define REG_STATUS1 0x41
52#define REG_STATUS2 0x42
53
54#define REG_VOLTAGE_MIN_BASE 0x46
55#define REG_VOLTAGE_MAX_BASE 0x47
56
57#define REG_TEMP_MIN_BASE 0x4E
58#define REG_TEMP_MAX_BASE 0x4F
59
60#define REG_TACH_MIN_BASE 0x54
61
62#define REG_PWM_CONFIG_BASE 0x5C
63
64#define REG_TEMP_TRANGE_BASE 0x5F
65
66#define REG_PWM_MIN_BASE 0x64
67
68#define REG_TEMP_TMIN_BASE 0x67
69#define REG_TEMP_THERM_BASE 0x6A
70
71#define REG_REMOTE1_HYSTERSIS 0x6D
72#define REG_REMOTE2_HYSTERSIS 0x6E
73
74#define REG_TEMP_OFFSET_BASE 0x70
75
76#define REG_EXTEND1 0x76
77#define REG_EXTEND2 0x77
78#define REG_CONFIG5 0x7C
79
80#define CONFIG5_TWOSCOMP 0x01
81#define CONFIG5_TEMPOFFSET 0x02
82
83/* ADT7475 Settings */
84
85#define ADT7475_VOLTAGE_COUNT 2
86#define ADT7475_TEMP_COUNT 3
87#define ADT7475_TACH_COUNT 4
88#define ADT7475_PWM_COUNT 3
89
90/* Macro to read the registers */
91
92#define adt7475_read(reg) i2c_smbus_read_byte_data(client, (reg))
93
94/* Macros to easily index the registers */
95
96#define TACH_REG(idx) (REG_TACH_BASE + ((idx) * 2))
97#define TACH_MIN_REG(idx) (REG_TACH_MIN_BASE + ((idx) * 2))
98
99#define PWM_REG(idx) (REG_PWM_BASE + (idx))
100#define PWM_MAX_REG(idx) (REG_PWM_MAX_BASE + (idx))
101#define PWM_MIN_REG(idx) (REG_PWM_MIN_BASE + (idx))
102#define PWM_CONFIG_REG(idx) (REG_PWM_CONFIG_BASE + (idx))
103
104#define VOLTAGE_REG(idx) (REG_VOLTAGE_BASE + (idx))
105#define VOLTAGE_MIN_REG(idx) (REG_VOLTAGE_MIN_BASE + ((idx) * 2))
106#define VOLTAGE_MAX_REG(idx) (REG_VOLTAGE_MAX_BASE + ((idx) * 2))
107
108#define TEMP_REG(idx) (REG_TEMP_BASE + (idx))
109#define TEMP_MIN_REG(idx) (REG_TEMP_MIN_BASE + ((idx) * 2))
110#define TEMP_MAX_REG(idx) (REG_TEMP_MAX_BASE + ((idx) * 2))
111#define TEMP_TMIN_REG(idx) (REG_TEMP_TMIN_BASE + (idx))
112#define TEMP_THERM_REG(idx) (REG_TEMP_THERM_BASE + (idx))
113#define TEMP_OFFSET_REG(idx) (REG_TEMP_OFFSET_BASE + (idx))
114#define TEMP_TRANGE_REG(idx) (REG_TEMP_TRANGE_BASE + (idx))
115
116static unsigned short normal_i2c[] = { 0x2e, I2C_CLIENT_END };
117
118I2C_CLIENT_INSMOD_1(adt7475);
119
120static const struct i2c_device_id adt7475_id[] = {
121 { "adt7475", adt7475 },
122 { }
123};
124MODULE_DEVICE_TABLE(i2c, adt7475_id);
125
126struct adt7475_data {
127 struct device *hwmon_dev;
128 struct mutex lock;
129
130 unsigned long measure_updated;
131 unsigned long limits_updated;
132 char valid;
133
134 u8 config5;
135 u16 alarms;
136 u16 voltage[3][3];
137 u16 temp[7][3];
138 u16 tach[2][4];
139 u8 pwm[4][3];
140 u8 range[3];
141 u8 pwmctl[3];
142 u8 pwmchan[3];
143};
144
145static struct i2c_driver adt7475_driver;
146static struct adt7475_data *adt7475_update_device(struct device *dev);
147static void adt7475_read_hystersis(struct i2c_client *client);
148static void adt7475_read_pwm(struct i2c_client *client, int index);
149
150/* Given a temp value, convert it to register value */
151
152static inline u16 temp2reg(struct adt7475_data *data, long val)
153{
154 u16 ret;
155
156 if (!(data->config5 & CONFIG5_TWOSCOMP)) {
157 val = SENSORS_LIMIT(val, -64000, 191000);
158 ret = (val + 64500) / 1000;
159 } else {
160 val = SENSORS_LIMIT(val, -128000, 127000);
161 if (val < -500)
162 ret = (256500 + val) / 1000;
163 else
164 ret = (val + 500) / 1000;
165 }
166
167 return ret << 2;
168}
169
170/* Given a register value, convert it to a real temp value */
171
172static inline int reg2temp(struct adt7475_data *data, u16 reg)
173{
174 if (data->config5 & CONFIG5_TWOSCOMP) {
175 if (reg >= 512)
176 return (reg - 1024) * 250;
177 else
178 return reg * 250;
179 } else
180 return (reg - 256) * 250;
181}
182
183static inline int tach2rpm(u16 tach)
184{
185 if (tach == 0 || tach == 0xFFFF)
186 return 0;
187
188 return (90000 * 60) / tach;
189}
190
191static inline u16 rpm2tach(unsigned long rpm)
192{
193 if (rpm == 0)
194 return 0;
195
196 return SENSORS_LIMIT((90000 * 60) / rpm, 1, 0xFFFF);
197}
198
199static inline int reg2vcc(u16 reg)
200{
201 return (4296 * reg) / 1000;
202}
203
204static inline int reg2vccp(u16 reg)
205{
206 return (2929 * reg) / 1000;
207}
208
209static inline u16 vcc2reg(long vcc)
210{
211 vcc = SENSORS_LIMIT(vcc, 0, 4396);
212 return (vcc * 1000) / 4296;
213}
214
215static inline u16 vccp2reg(long vcc)
216{
217 vcc = SENSORS_LIMIT(vcc, 0, 2998);
218 return (vcc * 1000) / 2929;
219}
220
221static u16 adt7475_read_word(struct i2c_client *client, int reg)
222{
223 u16 val;
224
225 val = i2c_smbus_read_byte_data(client, reg);
226 val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
227
228 return val;
229}
230
231static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
232{
233 i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
234 i2c_smbus_write_byte_data(client, reg, val & 0xFF);
235}
236
237/* Find the nearest value in a table - used for pwm frequency and
238 auto temp range */
239static int find_nearest(long val, const int *array, int size)
240{
241 int i;
242
243 if (val < array[0])
244 return 0;
245
246 if (val > array[size - 1])
247 return size - 1;
248
249 for (i = 0; i < size - 1; i++) {
250 int a, b;
251
252 if (val > array[i + 1])
253 continue;
254
255 a = val - array[i];
256 b = array[i + 1] - val;
257
258 return (a <= b) ? i : i + 1;
259 }
260
261 return 0;
262}
263
264static ssize_t show_voltage(struct device *dev, struct device_attribute *attr,
265 char *buf)
266{
267 struct adt7475_data *data = adt7475_update_device(dev);
268 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
269 unsigned short val;
270
271 switch (sattr->nr) {
272 case ALARM:
273 return sprintf(buf, "%d\n",
274 (data->alarms >> (sattr->index + 1)) & 1);
275 default:
276 val = data->voltage[sattr->nr][sattr->index];
277 return sprintf(buf, "%d\n",
278 sattr->index ==
279 0 ? reg2vccp(val) : reg2vcc(val));
280 }
281}
282
283static ssize_t set_voltage(struct device *dev, struct device_attribute *attr,
284 const char *buf, size_t count)
285{
286
287 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
288 struct i2c_client *client = to_i2c_client(dev);
289 struct adt7475_data *data = i2c_get_clientdata(client);
290 unsigned char reg;
291 long val;
292
293 if (strict_strtol(buf, 10, &val))
294 return -EINVAL;
295
296 mutex_lock(&data->lock);
297
298 data->voltage[sattr->nr][sattr->index] =
299 sattr->index ? vcc2reg(val) : vccp2reg(val);
300
301 if (sattr->nr == MIN)
302 reg = VOLTAGE_MIN_REG(sattr->index);
303 else
304 reg = VOLTAGE_MAX_REG(sattr->index);
305
306 i2c_smbus_write_byte_data(client, reg,
307 data->voltage[sattr->nr][sattr->index] >> 2);
308 mutex_unlock(&data->lock);
309
310 return count;
311}
312
313static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
314 char *buf)
315{
316 struct adt7475_data *data = adt7475_update_device(dev);
317 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
318 int out;
319
320 switch (sattr->nr) {
321 case HYSTERSIS:
322 mutex_lock(&data->lock);
323 out = data->temp[sattr->nr][sattr->index];
324 if (sattr->index != 1)
325 out = (out >> 4) & 0xF;
326 else
327 out = (out & 0xF);
328 /* Show the value as an absolute number tied to
329 * THERM */
330 out = reg2temp(data, data->temp[THERM][sattr->index]) -
331 out * 1000;
332 mutex_unlock(&data->lock);
333 break;
334
335 case OFFSET:
336 /* Offset is always 2's complement, regardless of the
337 * setting in CONFIG5 */
338 mutex_lock(&data->lock);
339 out = (s8)data->temp[sattr->nr][sattr->index];
340 if (data->config5 & CONFIG5_TEMPOFFSET)
341 out *= 1000;
342 else
343 out *= 500;
344 mutex_unlock(&data->lock);
345 break;
346
347 case ALARM:
348 out = (data->alarms >> (sattr->index + 4)) & 1;
349 break;
350
351 case FAULT:
352 /* Note - only for remote1 and remote2 */
353 out = data->alarms & (sattr->index ? 0x8000 : 0x4000);
354 out = out ? 0 : 1;
355 break;
356
357 default:
358 /* All other temp values are in the configured format */
359 out = reg2temp(data, data->temp[sattr->nr][sattr->index]);
360 }
361
362 return sprintf(buf, "%d\n", out);
363}
364
365static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
366 const char *buf, size_t count)
367{
368 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
369 struct i2c_client *client = to_i2c_client(dev);
370 struct adt7475_data *data = i2c_get_clientdata(client);
371 unsigned char reg = 0;
372 u8 out;
373 int temp;
374 long val;
375
376 if (strict_strtol(buf, 10, &val))
377 return -EINVAL;
378
379 mutex_lock(&data->lock);
380
381 /* We need the config register in all cases for temp <-> reg conv. */
382 data->config5 = adt7475_read(REG_CONFIG5);
383
384 switch (sattr->nr) {
385 case OFFSET:
386 if (data->config5 & CONFIG5_TEMPOFFSET) {
387 val = SENSORS_LIMIT(val, -63000, 127000);
388 out = data->temp[OFFSET][sattr->index] = val / 1000;
389 } else {
390 val = SENSORS_LIMIT(val, -63000, 64000);
391 out = data->temp[OFFSET][sattr->index] = val / 500;
392 }
393 break;
394
395 case HYSTERSIS:
396 /* The value will be given as an absolute value, turn it
397 into an offset based on THERM */
398
399 /* Read fresh THERM and HYSTERSIS values from the chip */
400 data->temp[THERM][sattr->index] =
401 adt7475_read(TEMP_THERM_REG(sattr->index)) << 2;
402 adt7475_read_hystersis(client);
403
404 temp = reg2temp(data, data->temp[THERM][sattr->index]);
405 val = SENSORS_LIMIT(val, temp - 15000, temp);
406 val = (temp - val) / 1000;
407
408 if (sattr->index != 1) {
409 data->temp[HYSTERSIS][sattr->index] &= 0xF0;
410 data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4;
411 } else {
412 data->temp[HYSTERSIS][sattr->index] &= 0x0F;
413 data->temp[HYSTERSIS][sattr->index] |= (val & 0xF);
414 }
415
416 out = data->temp[HYSTERSIS][sattr->index];
417 break;
418
419 default:
420 data->temp[sattr->nr][sattr->index] = temp2reg(data, val);
421
422 /* We maintain an extra 2 digits of precision for simplicity
423 * - shift those back off before writing the value */
424 out = (u8) (data->temp[sattr->nr][sattr->index] >> 2);
425 }
426
427 switch (sattr->nr) {
428 case MIN:
429 reg = TEMP_MIN_REG(sattr->index);
430 break;
431 case MAX:
432 reg = TEMP_MAX_REG(sattr->index);
433 break;
434 case OFFSET:
435 reg = TEMP_OFFSET_REG(sattr->index);
436 break;
437 case AUTOMIN:
438 reg = TEMP_TMIN_REG(sattr->index);
439 break;
440 case THERM:
441 reg = TEMP_THERM_REG(sattr->index);
442 break;
443 case HYSTERSIS:
444 if (sattr->index != 2)
445 reg = REG_REMOTE1_HYSTERSIS;
446 else
447 reg = REG_REMOTE2_HYSTERSIS;
448
449 break;
450 }
451
452 i2c_smbus_write_byte_data(client, reg, out);
453
454 mutex_unlock(&data->lock);
455 return count;
456}
457
458/* Table of autorange values - the user will write the value in millidegrees,
459 and we'll convert it */
460static const int autorange_table[] = {
461 2000, 2500, 3330, 4000, 5000, 6670, 8000,
462 10000, 13330, 16000, 20000, 26670, 32000, 40000,
463 53330, 80000
464};
465
466static ssize_t show_point2(struct device *dev, struct device_attribute *attr,
467 char *buf)
468{
469 struct adt7475_data *data = adt7475_update_device(dev);
470 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
471 int out, val;
472
473 mutex_lock(&data->lock);
474 out = (data->range[sattr->index] >> 4) & 0x0F;
475 val = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
476 mutex_unlock(&data->lock);
477
478 return sprintf(buf, "%d\n", val + autorange_table[out]);
479}
480
481static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
482 const char *buf, size_t count)
483{
484 struct i2c_client *client = to_i2c_client(dev);
485 struct adt7475_data *data = i2c_get_clientdata(client);
486 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
487 int temp;
488 long val;
489
490 if (strict_strtol(buf, 10, &val))
491 return -EINVAL;
492
493 mutex_lock(&data->lock);
494
495 /* Get a fresh copy of the needed registers */
496 data->config5 = adt7475_read(REG_CONFIG5);
497 data->temp[AUTOMIN][sattr->index] =
498 adt7475_read(TEMP_TMIN_REG(sattr->index)) << 2;
499 data->range[sattr->index] =
500 adt7475_read(TEMP_TRANGE_REG(sattr->index));
501
502 /* The user will write an absolute value, so subtract the start point
503 to figure the range */
504 temp = reg2temp(data, data->temp[AUTOMIN][sattr->index]);
505 val = SENSORS_LIMIT(val, temp + autorange_table[0],
506 temp + autorange_table[ARRAY_SIZE(autorange_table) - 1]);
507 val -= temp;
508
509 /* Find the nearest table entry to what the user wrote */
510 val = find_nearest(val, autorange_table, ARRAY_SIZE(autorange_table));
511
512 data->range[sattr->index] &= ~0xF0;
513 data->range[sattr->index] |= val << 4;
514
515 i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(sattr->index),
516 data->range[sattr->index]);
517
518 mutex_unlock(&data->lock);
519 return count;
520}
521
522static ssize_t show_tach(struct device *dev, struct device_attribute *attr,
523 char *buf)
524{
525 struct adt7475_data *data = adt7475_update_device(dev);
526 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
527 int out;
528
529 if (sattr->nr == ALARM)
530 out = (data->alarms >> (sattr->index + 10)) & 1;
531 else
532 out = tach2rpm(data->tach[sattr->nr][sattr->index]);
533
534 return sprintf(buf, "%d\n", out);
535}
536
537static ssize_t set_tach(struct device *dev, struct device_attribute *attr,
538 const char *buf, size_t count)
539{
540
541 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
542 struct i2c_client *client = to_i2c_client(dev);
543 struct adt7475_data *data = i2c_get_clientdata(client);
544 unsigned long val;
545
546 if (strict_strtoul(buf, 10, &val))
547 return -EINVAL;
548
549 mutex_lock(&data->lock);
550
551 data->tach[MIN][sattr->index] = rpm2tach(val);
552
553 adt7475_write_word(client, TACH_MIN_REG(sattr->index),
554 data->tach[MIN][sattr->index]);
555
556 mutex_unlock(&data->lock);
557 return count;
558}
559
560static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
561 char *buf)
562{
563 struct adt7475_data *data = adt7475_update_device(dev);
564 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
565
566 return sprintf(buf, "%d\n", data->pwm[sattr->nr][sattr->index]);
567}
568
569static ssize_t show_pwmchan(struct device *dev, struct device_attribute *attr,
570 char *buf)
571{
572 struct adt7475_data *data = adt7475_update_device(dev);
573 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
574
575 return sprintf(buf, "%d\n", data->pwmchan[sattr->index]);
576}
577
578static ssize_t show_pwmctrl(struct device *dev, struct device_attribute *attr,
579 char *buf)
580{
581 struct adt7475_data *data = adt7475_update_device(dev);
582 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
583
584 return sprintf(buf, "%d\n", data->pwmctl[sattr->index]);
585}
586
587static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
588 const char *buf, size_t count)
589{
590
591 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
592 struct i2c_client *client = to_i2c_client(dev);
593 struct adt7475_data *data = i2c_get_clientdata(client);
594 unsigned char reg = 0;
595 long val;
596
597 if (strict_strtol(buf, 10, &val))
598 return -EINVAL;
599
600 mutex_lock(&data->lock);
601
602 switch (sattr->nr) {
603 case INPUT:
604 /* Get a fresh value for CONTROL */
605 data->pwm[CONTROL][sattr->index] =
606 adt7475_read(PWM_CONFIG_REG(sattr->index));
607
608 /* If we are not in manual mode, then we shouldn't allow
609 * the user to set the pwm speed */
610 if (((data->pwm[CONTROL][sattr->index] >> 5) & 7) != 7) {
611 mutex_unlock(&data->lock);
612 return count;
613 }
614
615 reg = PWM_REG(sattr->index);
616 break;
617
618 case MIN:
619 reg = PWM_MIN_REG(sattr->index);
620 break;
621
622 case MAX:
623 reg = PWM_MAX_REG(sattr->index);
624 break;
625 }
626
627 data->pwm[sattr->nr][sattr->index] = SENSORS_LIMIT(val, 0, 0xFF);
628 i2c_smbus_write_byte_data(client, reg,
629 data->pwm[sattr->nr][sattr->index]);
630
631 mutex_unlock(&data->lock);
632
633 return count;
634}
635
636/* Called by set_pwmctrl and set_pwmchan */
637
638static int hw_set_pwm(struct i2c_client *client, int index,
639 unsigned int pwmctl, unsigned int pwmchan)
640{
641 struct adt7475_data *data = i2c_get_clientdata(client);
642 long val = 0;
643
644 switch (pwmctl) {
645 case 0:
646 val = 0x03; /* Run at full speed */
647 break;
648 case 1:
649 val = 0x07; /* Manual mode */
650 break;
651 case 2:
652 switch (pwmchan) {
653 case 1:
654 /* Remote1 controls PWM */
655 val = 0x00;
656 break;
657 case 2:
658 /* local controls PWM */
659 val = 0x01;
660 break;
661 case 4:
662 /* remote2 controls PWM */
663 val = 0x02;
664 break;
665 case 6:
666 /* local/remote2 control PWM */
667 val = 0x05;
668 break;
669 case 7:
670 /* All three control PWM */
671 val = 0x06;
672 break;
673 default:
674 return -EINVAL;
675 }
676 break;
677 default:
678 return -EINVAL;
679 }
680
681 data->pwmctl[index] = pwmctl;
682 data->pwmchan[index] = pwmchan;
683
684 data->pwm[CONTROL][index] &= ~0xE0;
685 data->pwm[CONTROL][index] |= (val & 7) << 5;
686
687 i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
688 data->pwm[CONTROL][index]);
689
690 return 0;
691}
692
693static ssize_t set_pwmchan(struct device *dev, struct device_attribute *attr,
694 const char *buf, size_t count)
695{
696 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
697 struct i2c_client *client = to_i2c_client(dev);
698 struct adt7475_data *data = i2c_get_clientdata(client);
699 int r;
700 long val;
701
702 if (strict_strtol(buf, 10, &val))
703 return -EINVAL;
704
705 mutex_lock(&data->lock);
706 /* Read Modify Write PWM values */
707 adt7475_read_pwm(client, sattr->index);
708 r = hw_set_pwm(client, sattr->index, data->pwmctl[sattr->index], val);
709 if (r)
710 count = r;
711 mutex_unlock(&data->lock);
712
713 return count;
714}
715
716static ssize_t set_pwmctrl(struct device *dev, struct device_attribute *attr,
717 const char *buf, size_t count)
718{
719 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
720 struct i2c_client *client = to_i2c_client(dev);
721 struct adt7475_data *data = i2c_get_clientdata(client);
722 int r;
723 long val;
724
725 if (strict_strtol(buf, 10, &val))
726 return -EINVAL;
727
728 mutex_lock(&data->lock);
729 /* Read Modify Write PWM values */
730 adt7475_read_pwm(client, sattr->index);
731 r = hw_set_pwm(client, sattr->index, val, data->pwmchan[sattr->index]);
732 if (r)
733 count = r;
734 mutex_unlock(&data->lock);
735
736 return count;
737}
738
739/* List of frequencies for the PWM */
740static const int pwmfreq_table[] = {
741 11, 14, 22, 29, 35, 44, 58, 88
742};
743
744static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
745 char *buf)
746{
747 struct adt7475_data *data = adt7475_update_device(dev);
748 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
749
750 return sprintf(buf, "%d\n",
751 pwmfreq_table[data->range[sattr->index] & 7]);
752}
753
754static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
755 const char *buf, size_t count)
756{
757 struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
758 struct i2c_client *client = to_i2c_client(dev);
759 struct adt7475_data *data = i2c_get_clientdata(client);
760 int out;
761 long val;
762
763 if (strict_strtol(buf, 10, &val))
764 return -EINVAL;
765
766 out = find_nearest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
767
768 mutex_lock(&data->lock);
769
770 data->range[sattr->index] =
771 adt7475_read(TEMP_TRANGE_REG(sattr->index));
772 data->range[sattr->index] &= ~7;
773 data->range[sattr->index] |= out;
774
775 i2c_smbus_write_byte_data(client, TEMP_TRANGE_REG(sattr->index),
776 data->range[sattr->index]);
777
778 mutex_unlock(&data->lock);
779 return count;
780}
781
782static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_voltage, NULL, INPUT, 0);
783static SENSOR_DEVICE_ATTR_2(in1_max, S_IRUGO | S_IWUSR, show_voltage,
784 set_voltage, MAX, 0);
785static SENSOR_DEVICE_ATTR_2(in1_min, S_IRUGO | S_IWUSR, show_voltage,
786 set_voltage, MIN, 0);
787static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, show_voltage, NULL, ALARM, 0);
788static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_voltage, NULL, INPUT, 1);
789static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_voltage,
790 set_voltage, MAX, 1);
791static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_voltage,
792 set_voltage, MIN, 1);
793static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, show_voltage, NULL, ALARM, 1);
794static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, INPUT, 0);
795static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, show_temp, NULL, ALARM, 0);
796static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, show_temp, NULL, FAULT, 0);
797static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
798 MAX, 0);
799static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
800 MIN, 0);
801static SENSOR_DEVICE_ATTR_2(temp1_offset, S_IRUGO | S_IWUSR, show_temp,
802 set_temp, OFFSET, 0);
803static SENSOR_DEVICE_ATTR_2(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
804 show_temp, set_temp, AUTOMIN, 0);
805static SENSOR_DEVICE_ATTR_2(temp1_auto_point2_temp, S_IRUGO | S_IWUSR,
806 show_point2, set_point2, 0, 0);
807static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
808 THERM, 0);
809static SENSOR_DEVICE_ATTR_2(temp1_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
810 set_temp, HYSTERSIS, 0);
811static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, INPUT, 1);
812static SENSOR_DEVICE_ATTR_2(temp2_alarm, S_IRUGO, show_temp, NULL, ALARM, 1);
813static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
814 MAX, 1);
815static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
816 MIN, 1);
817static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IRUGO | S_IWUSR, show_temp,
818 set_temp, OFFSET, 1);
819static SENSOR_DEVICE_ATTR_2(temp2_auto_point1_temp, S_IRUGO | S_IWUSR,
820 show_temp, set_temp, AUTOMIN, 1);
821static SENSOR_DEVICE_ATTR_2(temp2_auto_point2_temp, S_IRUGO | S_IWUSR,
822 show_point2, set_point2, 0, 1);
823static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
824 THERM, 1);
825static SENSOR_DEVICE_ATTR_2(temp2_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
826 set_temp, HYSTERSIS, 1);
827static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, INPUT, 2);
828static SENSOR_DEVICE_ATTR_2(temp3_alarm, S_IRUGO, show_temp, NULL, ALARM, 2);
829static SENSOR_DEVICE_ATTR_2(temp3_fault, S_IRUGO, show_temp, NULL, FAULT, 2);
830static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
831 MAX, 2);
832static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp, set_temp,
833 MIN, 2);
834static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp,
835 set_temp, OFFSET, 2);
836static SENSOR_DEVICE_ATTR_2(temp3_auto_point1_temp, S_IRUGO | S_IWUSR,
837 show_temp, set_temp, AUTOMIN, 2);
838static SENSOR_DEVICE_ATTR_2(temp3_auto_point2_temp, S_IRUGO | S_IWUSR,
839 show_point2, set_point2, 0, 2);
840static SENSOR_DEVICE_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, set_temp,
841 THERM, 2);
842static SENSOR_DEVICE_ATTR_2(temp3_crit_hyst, S_IRUGO | S_IWUSR, show_temp,
843 set_temp, HYSTERSIS, 2);
844static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_tach, NULL, INPUT, 0);
845static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
846 MIN, 0);
847static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, show_tach, NULL, ALARM, 0);
848static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_tach, NULL, INPUT, 1);
849static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
850 MIN, 1);
851static SENSOR_DEVICE_ATTR_2(fan2_alarm, S_IRUGO, show_tach, NULL, ALARM, 1);
852static SENSOR_DEVICE_ATTR_2(fan3_input, S_IRUGO, show_tach, NULL, INPUT, 2);
853static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
854 MIN, 2);
855static SENSOR_DEVICE_ATTR_2(fan3_alarm, S_IRUGO, show_tach, NULL, ALARM, 2);
856static SENSOR_DEVICE_ATTR_2(fan4_input, S_IRUGO, show_tach, NULL, INPUT, 3);
857static SENSOR_DEVICE_ATTR_2(fan4_min, S_IRUGO | S_IWUSR, show_tach, set_tach,
858 MIN, 3);
859static SENSOR_DEVICE_ATTR_2(fan4_alarm, S_IRUGO, show_tach, NULL, ALARM, 3);
860static SENSOR_DEVICE_ATTR_2(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
861 0);
862static SENSOR_DEVICE_ATTR_2(pwm1_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
863 set_pwmfreq, INPUT, 0);
864static SENSOR_DEVICE_ATTR_2(pwm1_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
865 set_pwmctrl, INPUT, 0);
866static SENSOR_DEVICE_ATTR_2(pwm1_auto_channel_temp, S_IRUGO | S_IWUSR,
867 show_pwmchan, set_pwmchan, INPUT, 0);
868static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
869 set_pwm, MIN, 0);
870static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
871 set_pwm, MAX, 0);
872static SENSOR_DEVICE_ATTR_2(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
873 1);
874static SENSOR_DEVICE_ATTR_2(pwm2_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
875 set_pwmfreq, INPUT, 1);
876static SENSOR_DEVICE_ATTR_2(pwm2_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
877 set_pwmctrl, INPUT, 1);
878static SENSOR_DEVICE_ATTR_2(pwm2_auto_channel_temp, S_IRUGO | S_IWUSR,
879 show_pwmchan, set_pwmchan, INPUT, 1);
880static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
881 set_pwm, MIN, 1);
882static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
883 set_pwm, MAX, 1);
884static SENSOR_DEVICE_ATTR_2(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, INPUT,
885 2);
886static SENSOR_DEVICE_ATTR_2(pwm3_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
887 set_pwmfreq, INPUT, 2);
888static SENSOR_DEVICE_ATTR_2(pwm3_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
889 set_pwmctrl, INPUT, 2);
890static SENSOR_DEVICE_ATTR_2(pwm3_auto_channel_temp, S_IRUGO | S_IWUSR,
891 show_pwmchan, set_pwmchan, INPUT, 2);
892static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
893 set_pwm, MIN, 2);
894static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR, show_pwm,
895 set_pwm, MAX, 2);
896
897static struct attribute *adt7475_attrs[] = {
898 &sensor_dev_attr_in1_input.dev_attr.attr,
899 &sensor_dev_attr_in1_max.dev_attr.attr,
900 &sensor_dev_attr_in1_min.dev_attr.attr,
901 &sensor_dev_attr_in1_alarm.dev_attr.attr,
902 &sensor_dev_attr_in2_input.dev_attr.attr,
903 &sensor_dev_attr_in2_max.dev_attr.attr,
904 &sensor_dev_attr_in2_min.dev_attr.attr,
905 &sensor_dev_attr_in2_alarm.dev_attr.attr,
906 &sensor_dev_attr_temp1_input.dev_attr.attr,
907 &sensor_dev_attr_temp1_alarm.dev_attr.attr,
908 &sensor_dev_attr_temp1_fault.dev_attr.attr,
909 &sensor_dev_attr_temp1_max.dev_attr.attr,
910 &sensor_dev_attr_temp1_min.dev_attr.attr,
911 &sensor_dev_attr_temp1_offset.dev_attr.attr,
912 &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
913 &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
914 &sensor_dev_attr_temp1_crit.dev_attr.attr,
915 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
916 &sensor_dev_attr_temp2_input.dev_attr.attr,
917 &sensor_dev_attr_temp2_alarm.dev_attr.attr,
918 &sensor_dev_attr_temp2_max.dev_attr.attr,
919 &sensor_dev_attr_temp2_min.dev_attr.attr,
920 &sensor_dev_attr_temp2_offset.dev_attr.attr,
921 &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
922 &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
923 &sensor_dev_attr_temp2_crit.dev_attr.attr,
924 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
925 &sensor_dev_attr_temp3_input.dev_attr.attr,
926 &sensor_dev_attr_temp3_fault.dev_attr.attr,
927 &sensor_dev_attr_temp3_alarm.dev_attr.attr,
928 &sensor_dev_attr_temp3_max.dev_attr.attr,
929 &sensor_dev_attr_temp3_min.dev_attr.attr,
930 &sensor_dev_attr_temp3_offset.dev_attr.attr,
931 &sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr,
932 &sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr,
933 &sensor_dev_attr_temp3_crit.dev_attr.attr,
934 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
935 &sensor_dev_attr_fan1_input.dev_attr.attr,
936 &sensor_dev_attr_fan1_min.dev_attr.attr,
937 &sensor_dev_attr_fan1_alarm.dev_attr.attr,
938 &sensor_dev_attr_fan2_input.dev_attr.attr,
939 &sensor_dev_attr_fan2_min.dev_attr.attr,
940 &sensor_dev_attr_fan2_alarm.dev_attr.attr,
941 &sensor_dev_attr_fan3_input.dev_attr.attr,
942 &sensor_dev_attr_fan3_min.dev_attr.attr,
943 &sensor_dev_attr_fan3_alarm.dev_attr.attr,
944 &sensor_dev_attr_fan4_input.dev_attr.attr,
945 &sensor_dev_attr_fan4_min.dev_attr.attr,
946 &sensor_dev_attr_fan4_alarm.dev_attr.attr,
947 &sensor_dev_attr_pwm1.dev_attr.attr,
948 &sensor_dev_attr_pwm1_freq.dev_attr.attr,
949 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
950 &sensor_dev_attr_pwm1_auto_channel_temp.dev_attr.attr,
951 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
952 &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
953 &sensor_dev_attr_pwm2.dev_attr.attr,
954 &sensor_dev_attr_pwm2_freq.dev_attr.attr,
955 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
956 &sensor_dev_attr_pwm2_auto_channel_temp.dev_attr.attr,
957 &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
958 &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
959 &sensor_dev_attr_pwm3.dev_attr.attr,
960 &sensor_dev_attr_pwm3_freq.dev_attr.attr,
961 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
962 &sensor_dev_attr_pwm3_auto_channel_temp.dev_attr.attr,
963 &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
964 &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
965 NULL,
966};
967
968struct attribute_group adt7475_attr_group = { .attrs = adt7475_attrs };
969
970static int adt7475_detect(struct i2c_client *client, int kind,
971 struct i2c_board_info *info)
972{
973 struct i2c_adapter *adapter = client->adapter;
974
975 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
976 return -ENODEV;
977
978 if (kind <= 0) {
979 if (adt7475_read(REG_VENDID) != 0x41 ||
980 adt7475_read(REG_DEVID) != 0x75) {
981 dev_err(&adapter->dev,
982 "Couldn't detect a adt7475 part at 0x%02x\n",
983 (unsigned int)client->addr);
984 return -ENODEV;
985 }
986 }
987
988 strlcpy(info->type, adt7475_id[0].name, I2C_NAME_SIZE);
989
990 return 0;
991}
992
993static int adt7475_probe(struct i2c_client *client,
994 const struct i2c_device_id *id)
995{
996 struct adt7475_data *data;
997 int i, ret = 0;
998
999 data = kzalloc(sizeof(*data), GFP_KERNEL);
1000 if (data == NULL)
1001 return -ENOMEM;
1002
1003 mutex_init(&data->lock);
1004 i2c_set_clientdata(client, data);
1005
1006 /* Call adt7475_read_pwm for all pwm's as this will reprogram any
1007 pwm's which are disabled to manual mode with 0% duty cycle */
1008 for (i = 0; i < ADT7475_PWM_COUNT; i++)
1009 adt7475_read_pwm(client, i);
1010
1011 ret = sysfs_create_group(&client->dev.kobj, &adt7475_attr_group);
1012 if (ret)
1013 goto efree;
1014
1015 data->hwmon_dev = hwmon_device_register(&client->dev);
1016 if (IS_ERR(data->hwmon_dev)) {
1017 ret = PTR_ERR(data->hwmon_dev);
1018 goto eremove;
1019 }
1020
1021 return 0;
1022
1023eremove:
1024 sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group);
1025efree:
1026 kfree(data);
1027 return ret;
1028}
1029
1030static int adt7475_remove(struct i2c_client *client)
1031{
1032 struct adt7475_data *data = i2c_get_clientdata(client);
1033
1034 hwmon_device_unregister(data->hwmon_dev);
1035 sysfs_remove_group(&client->dev.kobj, &adt7475_attr_group);
1036 kfree(data);
1037
1038 return 0;
1039}
1040
1041static struct i2c_driver adt7475_driver = {
1042 .class = I2C_CLASS_HWMON,
1043 .driver = {
1044 .name = "adt7475",
1045 },
1046 .probe = adt7475_probe,
1047 .remove = adt7475_remove,
1048 .id_table = adt7475_id,
1049 .detect = adt7475_detect,
1050 .address_data = &addr_data,
1051};
1052
1053static void adt7475_read_hystersis(struct i2c_client *client)
1054{
1055 struct adt7475_data *data = i2c_get_clientdata(client);
1056
1057 data->temp[HYSTERSIS][0] = (u16) adt7475_read(REG_REMOTE1_HYSTERSIS);
1058 data->temp[HYSTERSIS][1] = data->temp[HYSTERSIS][0];
1059 data->temp[HYSTERSIS][2] = (u16) adt7475_read(REG_REMOTE2_HYSTERSIS);
1060}
1061
1062static void adt7475_read_pwm(struct i2c_client *client, int index)
1063{
1064 struct adt7475_data *data = i2c_get_clientdata(client);
1065 unsigned int v;
1066
1067 data->pwm[CONTROL][index] = adt7475_read(PWM_CONFIG_REG(index));
1068
1069 /* Figure out the internal value for pwmctrl and pwmchan
1070 based on the current settings */
1071 v = (data->pwm[CONTROL][index] >> 5) & 7;
1072
1073 if (v == 3)
1074 data->pwmctl[index] = 0;
1075 else if (v == 7)
1076 data->pwmctl[index] = 1;
1077 else if (v == 4) {
1078 /* The fan is disabled - we don't want to
1079 support that, so change to manual mode and
1080 set the duty cycle to 0 instead
1081 */
1082 data->pwm[INPUT][index] = 0;
1083 data->pwm[CONTROL][index] &= ~0xE0;
1084 data->pwm[CONTROL][index] |= (7 << 5);
1085
1086 i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
1087 data->pwm[INPUT][index]);
1088
1089 i2c_smbus_write_byte_data(client, PWM_CONFIG_REG(index),
1090 data->pwm[CONTROL][index]);
1091
1092 data->pwmctl[index] = 1;
1093 } else {
1094 data->pwmctl[index] = 2;
1095
1096 switch (v) {
1097 case 0:
1098 data->pwmchan[index] = 1;
1099 break;
1100 case 1:
1101 data->pwmchan[index] = 2;
1102 break;
1103 case 2:
1104 data->pwmchan[index] = 4;
1105 break;
1106 case 5:
1107 data->pwmchan[index] = 6;
1108 break;
1109 case 6:
1110 data->pwmchan[index] = 7;
1111 break;
1112 }
1113 }
1114}
1115
1116static struct adt7475_data *adt7475_update_device(struct device *dev)
1117{
1118 struct i2c_client *client = to_i2c_client(dev);
1119 struct adt7475_data *data = i2c_get_clientdata(client);
1120 u8 ext;
1121 int i;
1122
1123 mutex_lock(&data->lock);
1124
1125 /* Measurement values update every 2 seconds */
1126 if (time_after(jiffies, data->measure_updated + HZ * 2) ||
1127 !data->valid) {
1128 data->alarms = adt7475_read(REG_STATUS2) << 8;
1129 data->alarms |= adt7475_read(REG_STATUS1);
1130
1131 ext = adt7475_read(REG_EXTEND1);
1132 for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++)
1133 data->voltage[INPUT][i] =
1134 (adt7475_read(VOLTAGE_REG(i)) << 2) |
1135 ((ext >> ((i + 1) * 2)) & 3);
1136
1137 ext = adt7475_read(REG_EXTEND2);
1138 for (i = 0; i < ADT7475_TEMP_COUNT; i++)
1139 data->temp[INPUT][i] =
1140 (adt7475_read(TEMP_REG(i)) << 2) |
1141 ((ext >> ((i + 1) * 2)) & 3);
1142
1143 for (i = 0; i < ADT7475_TACH_COUNT; i++)
1144 data->tach[INPUT][i] =
1145 adt7475_read_word(client, TACH_REG(i));
1146
1147 /* Updated by hw when in auto mode */
1148 for (i = 0; i < ADT7475_PWM_COUNT; i++)
1149 data->pwm[INPUT][i] = adt7475_read(PWM_REG(i));
1150
1151 data->measure_updated = jiffies;
1152 }
1153
1154 /* Limits and settings, should never change update every 60 seconds */
1155 if (time_after(jiffies, data->limits_updated + HZ * 2) ||
1156 !data->valid) {
1157 data->config5 = adt7475_read(REG_CONFIG5);
1158
1159 for (i = 0; i < ADT7475_VOLTAGE_COUNT; i++) {
1160 /* Adjust values so they match the input precision */
1161 data->voltage[MIN][i] =
1162 adt7475_read(VOLTAGE_MIN_REG(i)) << 2;
1163 data->voltage[MAX][i] =
1164 adt7475_read(VOLTAGE_MAX_REG(i)) << 2;
1165 }
1166
1167 for (i = 0; i < ADT7475_TEMP_COUNT; i++) {
1168 /* Adjust values so they match the input precision */
1169 data->temp[MIN][i] =
1170 adt7475_read(TEMP_MIN_REG(i)) << 2;
1171 data->temp[MAX][i] =
1172 adt7475_read(TEMP_MAX_REG(i)) << 2;
1173 data->temp[AUTOMIN][i] =
1174 adt7475_read(TEMP_TMIN_REG(i)) << 2;
1175 data->temp[THERM][i] =
1176 adt7475_read(TEMP_THERM_REG(i)) << 2;
1177 data->temp[OFFSET][i] =
1178 adt7475_read(TEMP_OFFSET_REG(i));
1179 }
1180 adt7475_read_hystersis(client);
1181
1182 for (i = 0; i < ADT7475_TACH_COUNT; i++)
1183 data->tach[MIN][i] =
1184 adt7475_read_word(client, TACH_MIN_REG(i));
1185
1186 for (i = 0; i < ADT7475_PWM_COUNT; i++) {
1187 data->pwm[MAX][i] = adt7475_read(PWM_MAX_REG(i));
1188 data->pwm[MIN][i] = adt7475_read(PWM_MIN_REG(i));
1189 /* Set the channel and control information */
1190 adt7475_read_pwm(client, i);
1191 }
1192
1193 data->range[0] = adt7475_read(TEMP_TRANGE_REG(0));
1194 data->range[1] = adt7475_read(TEMP_TRANGE_REG(1));
1195 data->range[2] = adt7475_read(TEMP_TRANGE_REG(2));
1196
1197 data->limits_updated = jiffies;
1198 data->valid = 1;
1199 }
1200
1201 mutex_unlock(&data->lock);
1202
1203 return data;
1204}
1205
1206static int __init sensors_adt7475_init(void)
1207{
1208 return i2c_add_driver(&adt7475_driver);
1209}
1210
1211static void __exit sensors_adt7475_exit(void)
1212{
1213 i2c_del_driver(&adt7475_driver);
1214}
1215
1216MODULE_AUTHOR("Advanced Micro Devices, Inc");
1217MODULE_DESCRIPTION("adt7475 driver");
1218MODULE_LICENSE("GPL");
1219
1220module_init(sensors_adt7475_init);
1221module_exit(sensors_adt7475_exit);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index dca47a591baf..e30186236588 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -590,6 +590,11 @@ static ssize_t applesmc_light_show(struct device *dev,
590 } 590 }
591 591
592 ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); 592 ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length);
593 /* newer macbooks report a single 10-bit bigendian value */
594 if (data_length == 10) {
595 left = be16_to_cpu(*(__be16 *)(buffer + 6)) >> 2;
596 goto out;
597 }
593 left = buffer[2]; 598 left = buffer[2];
594 if (ret) 599 if (ret)
595 goto out; 600 goto out;
diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c
index bf8d40580577..03705240000f 100644
--- a/drivers/hwmon/hp_accel.c
+++ b/drivers/hwmon/hp_accel.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007-2008 Yan Burman 4 * Copyright (C) 2007-2008 Yan Burman
5 * Copyright (C) 2008 Eric Piel 5 * Copyright (C) 2008 Eric Piel
6 * Copyright (C) 2008 Pavel Machek 6 * Copyright (C) 2008-2009 Pavel Machek
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -36,6 +36,7 @@
36#include <linux/freezer.h> 36#include <linux/freezer.h>
37#include <linux/version.h> 37#include <linux/version.h>
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39#include <linux/leds.h>
39#include <acpi/acpi_drivers.h> 40#include <acpi/acpi_drivers.h>
40#include <asm/atomic.h> 41#include <asm/atomic.h>
41#include "lis3lv02d.h" 42#include "lis3lv02d.h"
@@ -43,6 +44,36 @@
43#define DRIVER_NAME "lis3lv02d" 44#define DRIVER_NAME "lis3lv02d"
44#define ACPI_MDPS_CLASS "accelerometer" 45#define ACPI_MDPS_CLASS "accelerometer"
45 46
47/* Delayed LEDs infrastructure ------------------------------------ */
48
49/* Special LED class that can defer work */
50struct delayed_led_classdev {
51 struct led_classdev led_classdev;
52 struct work_struct work;
53 enum led_brightness new_brightness;
54
55 unsigned int led; /* For driver */
56 void (*set_brightness)(struct delayed_led_classdev *data, enum led_brightness value);
57};
58
59static inline void delayed_set_status_worker(struct work_struct *work)
60{
61 struct delayed_led_classdev *data =
62 container_of(work, struct delayed_led_classdev, work);
63
64 data->set_brightness(data, data->new_brightness);
65}
66
67static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
68 enum led_brightness brightness)
69{
70 struct delayed_led_classdev *data = container_of(led_cdev,
71 struct delayed_led_classdev, led_classdev);
72 data->new_brightness = brightness;
73 schedule_work(&data->work);
74}
75
76/* HP-specific accelerometer driver ------------------------------------ */
46 77
47/* For automatic insertion of the module */ 78/* For automatic insertion of the module */
48static struct acpi_device_id lis3lv02d_device_ids[] = { 79static struct acpi_device_id lis3lv02d_device_ids[] = {
@@ -154,10 +185,33 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
154 */ 185 */
155}; 186};
156 187
188static void hpled_set(struct delayed_led_classdev *led_cdev, enum led_brightness value)
189{
190 acpi_handle handle = adev.device->handle;
191 unsigned long long ret; /* Not used when writing */
192 union acpi_object in_obj[1];
193 struct acpi_object_list args = { 1, in_obj };
194
195 in_obj[0].type = ACPI_TYPE_INTEGER;
196 in_obj[0].integer.value = !!value;
197
198 acpi_evaluate_integer(handle, "ALED", &args, &ret);
199}
200
201static struct delayed_led_classdev hpled_led = {
202 .led_classdev = {
203 .name = "hp::hddprotect",
204 .default_trigger = "none",
205 .brightness_set = delayed_sysfs_set,
206 .flags = LED_CORE_SUSPENDRESUME,
207 },
208 .set_brightness = hpled_set,
209};
157 210
158static int lis3lv02d_add(struct acpi_device *device) 211static int lis3lv02d_add(struct acpi_device *device)
159{ 212{
160 u8 val; 213 u8 val;
214 int ret;
161 215
162 if (!device) 216 if (!device)
163 return -EINVAL; 217 return -EINVAL;
@@ -183,7 +237,19 @@ static int lis3lv02d_add(struct acpi_device *device)
183 adev.ac = lis3lv02d_axis_normal; 237 adev.ac = lis3lv02d_axis_normal;
184 } 238 }
185 239
186 return lis3lv02d_init_device(&adev); 240 INIT_WORK(&hpled_led.work, delayed_set_status_worker);
241 ret = led_classdev_register(NULL, &hpled_led.led_classdev);
242 if (ret)
243 return ret;
244
245 ret = lis3lv02d_init_device(&adev);
246 if (ret) {
247 flush_work(&hpled_led.work);
248 led_classdev_unregister(&hpled_led.led_classdev);
249 return ret;
250 }
251
252 return ret;
187} 253}
188 254
189static int lis3lv02d_remove(struct acpi_device *device, int type) 255static int lis3lv02d_remove(struct acpi_device *device, int type)
@@ -194,6 +260,9 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
194 lis3lv02d_joystick_disable(); 260 lis3lv02d_joystick_disable();
195 lis3lv02d_poweroff(device->handle); 261 lis3lv02d_poweroff(device->handle);
196 262
263 flush_work(&hpled_led.work);
264 led_classdev_unregister(&hpled_led.led_classdev);
265
197 return lis3lv02d_remove_fs(); 266 return lis3lv02d_remove_fs();
198} 267}
199 268
@@ -256,7 +325,7 @@ static void __exit lis3lv02d_exit_module(void)
256 acpi_bus_unregister_driver(&lis3lv02d_driver); 325 acpi_bus_unregister_driver(&lis3lv02d_driver);
257} 326}
258 327
259MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS"); 328MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED.");
260MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek"); 329MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek");
261MODULE_LICENSE("GPL"); 330MODULE_LICENSE("GPL");
262 331
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index bd2bde0ef95e..1fe995111841 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -31,6 +31,7 @@
31#include <linux/hwmon-sysfs.h> 31#include <linux/hwmon-sysfs.h>
32#include <linux/err.h> 32#include <linux/err.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <asm/processor.h>
34 35
35#define TEMP_FROM_REG(val) (((((val) >> 16) & 0xff) - 49) * 1000) 36#define TEMP_FROM_REG(val) (((((val) >> 16) & 0xff) - 49) * 1000)
36#define REG_TEMP 0xe4 37#define REG_TEMP 0xe4
@@ -47,6 +48,8 @@ struct k8temp_data {
47 /* registers values */ 48 /* registers values */
48 u8 sensorsp; /* sensor presence bits - SEL_CORE & SEL_PLACE */ 49 u8 sensorsp; /* sensor presence bits - SEL_CORE & SEL_PLACE */
49 u32 temp[2][2]; /* core, place */ 50 u32 temp[2][2]; /* core, place */
51 u8 swap_core_select; /* meaning of SEL_CORE is inverted */
52 u32 temp_offset;
50}; 53};
51 54
52static struct k8temp_data *k8temp_update_device(struct device *dev) 55static struct k8temp_data *k8temp_update_device(struct device *dev)
@@ -114,10 +117,15 @@ static ssize_t show_temp(struct device *dev,
114 to_sensor_dev_attr_2(devattr); 117 to_sensor_dev_attr_2(devattr);
115 int core = attr->nr; 118 int core = attr->nr;
116 int place = attr->index; 119 int place = attr->index;
120 int temp;
117 struct k8temp_data *data = k8temp_update_device(dev); 121 struct k8temp_data *data = k8temp_update_device(dev);
118 122
119 return sprintf(buf, "%d\n", 123 if (data->swap_core_select)
120 TEMP_FROM_REG(data->temp[core][place])); 124 core = core ? 0 : 1;
125
126 temp = TEMP_FROM_REG(data->temp[core][place]) + data->temp_offset;
127
128 return sprintf(buf, "%d\n", temp);
121} 129}
122 130
123/* core, place */ 131/* core, place */
@@ -141,20 +149,49 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
141 int err; 149 int err;
142 u8 scfg; 150 u8 scfg;
143 u32 temp; 151 u32 temp;
152 u8 model, stepping;
144 struct k8temp_data *data; 153 struct k8temp_data *data;
145 u32 cpuid = cpuid_eax(1);
146
147 /* this feature should be available since SH-C0 core */
148 if ((cpuid == 0xf40) || (cpuid == 0xf50) || (cpuid == 0xf51)) {
149 err = -ENODEV;
150 goto exit;
151 }
152 154
153 if (!(data = kzalloc(sizeof(struct k8temp_data), GFP_KERNEL))) { 155 if (!(data = kzalloc(sizeof(struct k8temp_data), GFP_KERNEL))) {
154 err = -ENOMEM; 156 err = -ENOMEM;
155 goto exit; 157 goto exit;
156 } 158 }
157 159
160 model = boot_cpu_data.x86_model;
161 stepping = boot_cpu_data.x86_mask;
162
163 switch (boot_cpu_data.x86) {
164 case 0xf:
165 /* feature available since SH-C0, exclude older revisions */
166 if (((model == 4) && (stepping == 0)) ||
167 ((model == 5) && (stepping <= 1))) {
168 err = -ENODEV;
169 goto exit_free;
170 }
171
172 /*
173 * AMD NPT family 0fh, i.e. RevF and RevG:
174 * meaning of SEL_CORE bit is inverted
175 */
176 if (model >= 0x40) {
177 data->swap_core_select = 1;
178 dev_warn(&pdev->dev, "Temperature readouts might be "
179 "wrong - check erratum #141\n");
180 }
181
182 if ((model >= 0x69) &&
183 !(model == 0xc1 || model == 0x6c || model == 0x7c)) {
184 /*
185 * RevG desktop CPUs (i.e. no socket S1G1 parts)
186 * need additional offset, otherwise reported
187 * temperature is below ambient temperature
188 */
189 data->temp_offset = 21000;
190 }
191
192 break;
193 }
194
158 pci_read_config_byte(pdev, REG_TEMP, &scfg); 195 pci_read_config_byte(pdev, REG_TEMP, &scfg);
159 scfg &= ~(SEL_PLACE | SEL_CORE); /* Select sensor 0, core0 */ 196 scfg &= ~(SEL_PLACE | SEL_CORE); /* Select sensor 0, core0 */
160 pci_write_config_byte(pdev, REG_TEMP, scfg); 197 pci_write_config_byte(pdev, REG_TEMP, scfg);
diff --git a/drivers/i2c/busses/i2c-acorn.c b/drivers/i2c/busses/i2c-acorn.c
index 75089febbc13..9fee3ca17344 100644
--- a/drivers/i2c/busses/i2c-acorn.c
+++ b/drivers/i2c/busses/i2c-acorn.c
@@ -83,7 +83,6 @@ static struct i2c_algo_bit_data ioc_data = {
83}; 83};
84 84
85static struct i2c_adapter ioc_ops = { 85static struct i2c_adapter ioc_ops = {
86 .id = I2C_HW_B_IOC,
87 .algo_data = &ioc_data, 86 .algo_data = &ioc_data,
88}; 87};
89 88
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 9cead9b9458e..981e080b32ae 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -476,7 +476,6 @@ static const struct i2c_algorithm smbus_algorithm = {
476 476
477static struct i2c_adapter ali1535_adapter = { 477static struct i2c_adapter ali1535_adapter = {
478 .owner = THIS_MODULE, 478 .owner = THIS_MODULE,
479 .id = I2C_HW_SMBUS_ALI1535,
480 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 479 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
481 .algo = &smbus_algorithm, 480 .algo = &smbus_algorithm,
482}; 481};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index dd9e796fad69..f70f46582c6c 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -386,7 +386,6 @@ static const struct i2c_algorithm ali1563_algorithm = {
386 386
387static struct i2c_adapter ali1563_adapter = { 387static struct i2c_adapter ali1563_adapter = {
388 .owner = THIS_MODULE, 388 .owner = THIS_MODULE,
389 .id = I2C_HW_SMBUS_ALI1563,
390 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 389 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
391 .algo = &ali1563_algorithm, 390 .algo = &ali1563_algorithm,
392}; 391};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 234fdde7d40e..39066dee46e3 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -473,7 +473,6 @@ static const struct i2c_algorithm smbus_algorithm = {
473 473
474static struct i2c_adapter ali15x3_adapter = { 474static struct i2c_adapter ali15x3_adapter = {
475 .owner = THIS_MODULE, 475 .owner = THIS_MODULE,
476 .id = I2C_HW_SMBUS_ALI15X3,
477 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 476 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
478 .algo = &smbus_algorithm, 477 .algo = &smbus_algorithm,
479}; 478};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 36bee5b9c952..220f4a1eee1d 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -298,7 +298,6 @@ static const struct i2c_algorithm smbus_algorithm = {
298 298
299struct i2c_adapter amd756_smbus = { 299struct i2c_adapter amd756_smbus = {
300 .owner = THIS_MODULE, 300 .owner = THIS_MODULE,
301 .id = I2C_HW_SMBUS_AMD756,
302 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 301 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
303 .algo = &smbus_algorithm, 302 .algo = &smbus_algorithm,
304}; 303};
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 3972208876b3..edab51973bf5 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -387,7 +387,6 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
387 smbus->adapter.owner = THIS_MODULE; 387 smbus->adapter.owner = THIS_MODULE;
388 snprintf(smbus->adapter.name, sizeof(smbus->adapter.name), 388 snprintf(smbus->adapter.name, sizeof(smbus->adapter.name),
389 "SMBus2 AMD8111 adapter at %04x", smbus->base); 389 "SMBus2 AMD8111 adapter at %04x", smbus->base);
390 smbus->adapter.id = I2C_HW_SMBUS_AMD8111;
391 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 390 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
392 smbus->adapter.algo = &smbus_algorithm; 391 smbus->adapter.algo = &smbus_algorithm;
393 smbus->adapter.algo_data = smbus; 392 smbus->adapter.algo_data = smbus;
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 66a04c2c660f..f78ce523e3db 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -400,7 +400,6 @@ i2c_au1550_probe(struct platform_device *pdev)
400 priv->xfer_timeout = 200; 400 priv->xfer_timeout = 200;
401 priv->ack_timeout = 200; 401 priv->ack_timeout = 200;
402 402
403 priv->adap.id = I2C_HW_AU1550_PSC;
404 priv->adap.nr = pdev->id; 403 priv->adap.nr = pdev->id;
405 priv->adap.algo = &au1550_algo; 404 priv->adap.algo = &au1550_algo;
406 priv->adap.algo_data = priv; 405 priv->adap.algo_data = priv;
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 3fd2c417c1e0..fc548b3d002e 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -651,7 +651,6 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
651 iface->timeout_timer.data = (unsigned long)iface; 651 iface->timeout_timer.data = (unsigned long)iface;
652 652
653 p_adap = &iface->adap; 653 p_adap = &iface->adap;
654 p_adap->id = I2C_HW_BLACKFIN;
655 p_adap->nr = pdev->id; 654 p_adap->nr = pdev->id;
656 strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name)); 655 strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
657 p_adap->algo = &bfin_twi_algorithm; 656 p_adap->algo = &bfin_twi_algorithm;
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 0ed3ccb81b63..448b4bf35eb7 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -202,7 +202,6 @@ static struct i2c_algo_pcf_data pcf_isa_data = {
202static struct i2c_adapter pcf_isa_ops = { 202static struct i2c_adapter pcf_isa_ops = {
203 .owner = THIS_MODULE, 203 .owner = THIS_MODULE,
204 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 204 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
205 .id = I2C_HW_P_ELEK,
206 .algo_data = &pcf_isa_data, 205 .algo_data = &pcf_isa_data,
207 .name = "i2c-elektor", 206 .name = "i2c-elektor",
208}; 207};
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 648aa7baff83..bec9b845dd16 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -102,7 +102,6 @@ static struct i2c_algo_bit_data hydra_bit_data = {
102static struct i2c_adapter hydra_adap = { 102static struct i2c_adapter hydra_adap = {
103 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
104 .name = "Hydra i2c", 104 .name = "Hydra i2c",
105 .id = I2C_HW_B_HYDRA,
106 .algo_data = &hydra_bit_data, 105 .algo_data = &hydra_bit_data,
107}; 106};
108 107
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 526625eaa84b..230238df56c4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -556,7 +556,6 @@ static const struct i2c_algorithm smbus_algorithm = {
556 556
557static struct i2c_adapter i801_adapter = { 557static struct i2c_adapter i801_adapter = {
558 .owner = THIS_MODULE, 558 .owner = THIS_MODULE,
559 .id = I2C_HW_SMBUS_I801,
560 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 559 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
561 .algo = &smbus_algorithm, 560 .algo = &smbus_algorithm,
562}; 561};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 651f2f1ae5b7..88f0db73b364 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -746,7 +746,6 @@ static int __devinit iic_probe(struct of_device *ofdev,
746 adap->dev.parent = &ofdev->dev; 746 adap->dev.parent = &ofdev->dev;
747 strlcpy(adap->name, "IBM IIC", sizeof(adap->name)); 747 strlcpy(adap->name, "IBM IIC", sizeof(adap->name));
748 i2c_set_adapdata(adap, dev); 748 i2c_set_adapdata(adap, dev);
749 adap->id = I2C_HW_OCP;
750 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 749 adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
751 adap->algo = &iic_algo; 750 adap->algo = &iic_algo;
752 adap->timeout = 1; 751 adap->timeout = 1;
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index fc2714ac0c0f..3190690c26ce 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -480,7 +480,6 @@ iop3xx_i2c_probe(struct platform_device *pdev)
480 } 480 }
481 481
482 memcpy(new_adapter->name, pdev->name, strlen(pdev->name)); 482 memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
483 new_adapter->id = I2C_HW_IOP3XX;
484 new_adapter->owner = THIS_MODULE; 483 new_adapter->owner = THIS_MODULE;
485 new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 484 new_adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
486 new_adapter->dev.parent = &pdev->dev; 485 new_adapter->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index 05d72e981353..8e8467970481 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -116,7 +116,6 @@ static int ixp2000_i2c_probe(struct platform_device *plat_dev)
116 drv_data->algo_data.udelay = 6; 116 drv_data->algo_data.udelay = 6;
117 drv_data->algo_data.timeout = 100; 117 drv_data->algo_data.timeout = 100;
118 118
119 drv_data->adapter.id = I2C_HW_B_IXP2000,
120 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name, 119 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
121 sizeof(drv_data->adapter.name)); 120 sizeof(drv_data->adapter.name));
122 drv_data->adapter.algo_data = &drv_data->algo_data, 121 drv_data->adapter.algo_data = &drv_data->algo_data,
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a9a45fcc8544..aedbbe6618db 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -310,7 +310,6 @@ static const struct i2c_algorithm mpc_algo = {
310static struct i2c_adapter mpc_ops = { 310static struct i2c_adapter mpc_ops = {
311 .owner = THIS_MODULE, 311 .owner = THIS_MODULE,
312 .name = "MPC adapter", 312 .name = "MPC adapter",
313 .id = I2C_HW_MPC107,
314 .algo = &mpc_algo, 313 .algo = &mpc_algo,
315 .timeout = 1, 314 .timeout = 1,
316}; 315};
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 9e8118d2fe64..eeda276f8f16 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -527,7 +527,6 @@ mv64xxx_i2c_probe(struct platform_device *pd)
527 goto exit_unmap_regs; 527 goto exit_unmap_regs;
528 } 528 }
529 drv_data->adapter.dev.parent = &pd->dev; 529 drv_data->adapter.dev.parent = &pd->dev;
530 drv_data->adapter.id = I2C_HW_MV64XXX;
531 drv_data->adapter.algo = &mv64xxx_i2c_algo; 530 drv_data->adapter.algo = &mv64xxx_i2c_algo;
532 drv_data->adapter.owner = THIS_MODULE; 531 drv_data->adapter.owner = THIS_MODULE;
533 drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 532 drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 3b19bc41a60b..05af6cd7f270 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -355,7 +355,6 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
355 return -EBUSY; 355 return -EBUSY;
356 } 356 }
357 smbus->adapter.owner = THIS_MODULE; 357 smbus->adapter.owner = THIS_MODULE;
358 smbus->adapter.id = I2C_HW_SMBUS_NFORCE2;
359 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 358 smbus->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
360 smbus->adapter.algo = &smbus_algorithm; 359 smbus->adapter.algo = &smbus_algorithm;
361 smbus->adapter.algo_data = smbus; 360 smbus->adapter.algo_data = smbus;
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index b2b8380f6602..322c5691e38e 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -115,7 +115,6 @@ static struct i2c_algo_bit_data parport_algo_data = {
115static struct i2c_adapter parport_adapter = { 115static struct i2c_adapter parport_adapter = {
116 .owner = THIS_MODULE, 116 .owner = THIS_MODULE,
117 .class = I2C_CLASS_HWMON, 117 .class = I2C_CLASS_HWMON,
118 .id = I2C_HW_B_LP,
119 .algo_data = &parport_algo_data, 118 .algo_data = &parport_algo_data,
120 .name = "Parallel port adapter (light)", 119 .name = "Parallel port adapter (light)",
121}; 120};
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index a257cd5cd134..0d8998610c74 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -164,7 +164,6 @@ static void i2c_parport_attach (struct parport *port)
164 /* Fill the rest of the structure */ 164 /* Fill the rest of the structure */
165 adapter->adapter.owner = THIS_MODULE; 165 adapter->adapter.owner = THIS_MODULE;
166 adapter->adapter.class = I2C_CLASS_HWMON; 166 adapter->adapter.class = I2C_CLASS_HWMON;
167 adapter->adapter.id = I2C_HW_B_LP;
168 strlcpy(adapter->adapter.name, "Parallel port adapter", 167 strlcpy(adapter->adapter.name, "Parallel port adapter",
169 sizeof(adapter->adapter.name)); 168 sizeof(adapter->adapter.name));
170 adapter->algo_data = parport_algo_data; 169 adapter->algo_data = parport_algo_data;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 9eb76268ec78..4aa8138cb0a9 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -101,7 +101,6 @@ static struct i2c_algo_pca_data pca_isa_data = {
101 101
102static struct i2c_adapter pca_isa_ops = { 102static struct i2c_adapter pca_isa_ops = {
103 .owner = THIS_MODULE, 103 .owner = THIS_MODULE,
104 .id = I2C_HW_A_ISA,
105 .algo_data = &pca_isa_data, 104 .algo_data = &pca_isa_data,
106 .name = "PCA9564 ISA Adapter", 105 .name = "PCA9564 ISA Adapter",
107 .timeout = 100, 106 .timeout = 100,
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index eaa9b387543e..761f9dd53620 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -403,7 +403,6 @@ static const struct i2c_algorithm smbus_algorithm = {
403 403
404static struct i2c_adapter piix4_adapter = { 404static struct i2c_adapter piix4_adapter = {
405 .owner = THIS_MODULE, 405 .owner = THIS_MODULE,
406 .id = I2C_HW_SMBUS_PIIX4,
407 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 406 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
408 .algo = &smbus_algorithm, 407 .algo = &smbus_algorithm,
409}; 408};
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 4ddefbf238e9..98b1ec489159 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -155,7 +155,6 @@ static struct i2c_algo_sibyte_data sibyte_board_data[2] = {
155static struct i2c_adapter sibyte_board_adapter[2] = { 155static struct i2c_adapter sibyte_board_adapter[2] = {
156 { 156 {
157 .owner = THIS_MODULE, 157 .owner = THIS_MODULE,
158 .id = I2C_HW_SIBYTE,
159 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 158 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
160 .algo = NULL, 159 .algo = NULL,
161 .algo_data = &sibyte_board_data[0], 160 .algo_data = &sibyte_board_data[0],
@@ -164,7 +163,6 @@ static struct i2c_adapter sibyte_board_adapter[2] = {
164 }, 163 },
165 { 164 {
166 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
167 .id = I2C_HW_SIBYTE,
168 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 166 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
169 .algo = NULL, 167 .algo = NULL,
170 .algo_data = &sibyte_board_data[1], 168 .algo_data = &sibyte_board_data[1],
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 8ce2daff985c..f320ab27da46 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -365,7 +365,6 @@ static const struct i2c_algorithm smbus_algorithm = {
365 365
366static struct i2c_adapter sis5595_adapter = { 366static struct i2c_adapter sis5595_adapter = {
367 .owner = THIS_MODULE, 367 .owner = THIS_MODULE,
368 .id = I2C_HW_SMBUS_SIS5595,
369 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 368 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
370 .algo = &smbus_algorithm, 369 .algo = &smbus_algorithm,
371}; 370};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 9c9c016ff2b5..50c3610e6028 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -464,7 +464,6 @@ static const struct i2c_algorithm smbus_algorithm = {
464 464
465static struct i2c_adapter sis630_adapter = { 465static struct i2c_adapter sis630_adapter = {
466 .owner = THIS_MODULE, 466 .owner = THIS_MODULE,
467 .id = I2C_HW_SMBUS_SIS630,
468 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 467 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
469 .algo = &smbus_algorithm, 468 .algo = &smbus_algorithm,
470}; 469};
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index f1bba6396641..7e1594b40579 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -241,7 +241,6 @@ static const struct i2c_algorithm smbus_algorithm = {
241 241
242static struct i2c_adapter sis96x_adapter = { 242static struct i2c_adapter sis96x_adapter = {
243 .owner = THIS_MODULE, 243 .owner = THIS_MODULE,
244 .id = I2C_HW_SMBUS_SIS96X,
245 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 244 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
246 .algo = &smbus_algorithm, 245 .algo = &smbus_algorithm,
247}; 246};
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 29cef0433f34..8b24f192103a 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -83,7 +83,6 @@ static struct i2c_algo_bit_data bit_data = {
83 83
84static struct i2c_adapter vt586b_adapter = { 84static struct i2c_adapter vt586b_adapter = {
85 .owner = THIS_MODULE, 85 .owner = THIS_MODULE,
86 .id = I2C_HW_B_VIA,
87 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 86 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
88 .name = "VIA i2c", 87 .name = "VIA i2c",
89 .algo_data = &bit_data, 88 .algo_data = &bit_data,
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 9f194d9efd91..02e6f724b05f 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -321,7 +321,6 @@ static const struct i2c_algorithm smbus_algorithm = {
321 321
322static struct i2c_adapter vt596_adapter = { 322static struct i2c_adapter vt596_adapter = {
323 .owner = THIS_MODULE, 323 .owner = THIS_MODULE,
324 .id = I2C_HW_SMBUS_VIA2,
325 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 324 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
326 .algo = &smbus_algorithm, 325 .algo = &smbus_algorithm,
327}; 326};
diff --git a/drivers/i2c/busses/i2c-voodoo3.c b/drivers/i2c/busses/i2c-voodoo3.c
index 1d4ae26ba73d..1a474acc0ddd 100644
--- a/drivers/i2c/busses/i2c-voodoo3.c
+++ b/drivers/i2c/busses/i2c-voodoo3.c
@@ -163,7 +163,6 @@ static struct i2c_algo_bit_data voo_i2c_bit_data = {
163 163
164static struct i2c_adapter voodoo3_i2c_adapter = { 164static struct i2c_adapter voodoo3_i2c_adapter = {
165 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
166 .id = I2C_HW_B_VOO,
167 .class = I2C_CLASS_TV_ANALOG, 166 .class = I2C_CLASS_TV_ANALOG,
168 .name = "I2C Voodoo3/Banshee adapter", 167 .name = "I2C Voodoo3/Banshee adapter",
169 .algo_data = &voo_i2c_bit_data, 168 .algo_data = &voo_i2c_bit_data,
@@ -180,7 +179,6 @@ static struct i2c_algo_bit_data voo_ddc_bit_data = {
180 179
181static struct i2c_adapter voodoo3_ddc_adapter = { 180static struct i2c_adapter voodoo3_ddc_adapter = {
182 .owner = THIS_MODULE, 181 .owner = THIS_MODULE,
183 .id = I2C_HW_B_VOO,
184 .class = I2C_CLASS_DDC, 182 .class = I2C_CLASS_DDC,
185 .name = "DDC Voodoo3/Banshee adapter", 183 .name = "DDC Voodoo3/Banshee adapter",
186 .algo_data = &voo_ddc_bit_data, 184 .algo_data = &voo_ddc_bit_data,
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index ed794b145a11..648ecc6f60e6 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -440,7 +440,6 @@ static __init struct scx200_acb_iface *scx200_create_iface(const char *text,
440 i2c_set_adapdata(adapter, iface); 440 i2c_set_adapdata(adapter, iface);
441 snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index); 441 snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index);
442 adapter->owner = THIS_MODULE; 442 adapter->owner = THIS_MODULE;
443 adapter->id = I2C_HW_SMBUS_SCX200;
444 adapter->algo = &scx200_acb_algorithm; 443 adapter->algo = &scx200_acb_algorithm;
445 adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; 444 adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
446 adapter->dev.parent = dev; 445 adapter->dev.parent = dev;
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
index e4c98539c517..162b74a04886 100644
--- a/drivers/i2c/busses/scx200_i2c.c
+++ b/drivers/i2c/busses/scx200_i2c.c
@@ -82,7 +82,6 @@ static struct i2c_algo_bit_data scx200_i2c_data = {
82static struct i2c_adapter scx200_i2c_ops = { 82static struct i2c_adapter scx200_i2c_ops = {
83 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
84 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, 84 .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
85 .id = I2C_HW_B_SCX200,
86 .algo_data = &scx200_i2c_data, 85 .algo_data = &scx200_i2c_data,
87 .name = "NatSemi SCx200 I2C", 86 .name = "NatSemi SCx200 I2C",
88}; 87};
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index b9bef04b7be4..c80312c1f382 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -16,43 +16,6 @@ config DS1682
16 This driver can also be built as a module. If so, the module 16 This driver can also be built as a module. If so, the module
17 will be called ds1682. 17 will be called ds1682.
18 18
19config AT24
20 tristate "EEPROMs from most vendors"
21 depends on SYSFS && EXPERIMENTAL
22 help
23 Enable this driver to get read/write support to most I2C EEPROMs,
24 after you configure the driver to know about each EEPROM on
25 your target board. Use these generic chip names, instead of
26 vendor-specific ones like at24c64 or 24lc02:
27
28 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
29 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
30
31 Unless you like data loss puzzles, always be sure that any chip
32 you configure as a 24c32 (32 kbit) or larger is NOT really a
33 24c16 (16 kbit) or smaller, and vice versa. Marking the chip
34 as read-only won't help recover from this. Also, if your chip
35 has any software write-protect mechanism you may want to review the
36 code to make sure this driver won't turn it on by accident.
37
38 If you use this with an SMBus adapter instead of an I2C adapter,
39 full functionality is not available. Only smaller devices are
40 supported (24c16 and below, max 4 kByte).
41
42 This driver can also be built as a module. If so, the module
43 will be called at24.
44
45config SENSORS_EEPROM
46 tristate "EEPROM reader"
47 depends on EXPERIMENTAL
48 help
49 If you say yes here you get read-only access to the EEPROM data
50 available on modern memory DIMMs and Sony Vaio laptops. Such
51 EEPROMs could theoretically be available on other devices as well.
52
53 This driver can also be built as a module. If so, the module
54 will be called eeprom.
55
56config SENSORS_PCF8574 19config SENSORS_PCF8574
57 tristate "Philips PCF8574 and PCF8574A (DEPRECATED)" 20 tristate "Philips PCF8574 and PCF8574A (DEPRECATED)"
58 depends on EXPERIMENTAL && GPIO_PCF857X = "n" 21 depends on EXPERIMENTAL && GPIO_PCF857X = "n"
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index 00fcb5193ac2..d142f238a2de 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -11,8 +11,6 @@
11# 11#
12 12
13obj-$(CONFIG_DS1682) += ds1682.o 13obj-$(CONFIG_DS1682) += ds1682.o
14obj-$(CONFIG_AT24) += at24.o
15obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o
16obj-$(CONFIG_SENSORS_MAX6875) += max6875.o 14obj-$(CONFIG_SENSORS_MAX6875) += max6875.o
17obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o 15obj-$(CONFIG_SENSORS_PCA9539) += pca9539.o
18obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o 16obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c
index a5ba820d69bb..a638e952d67a 100644
--- a/drivers/ide/falconide.c
+++ b/drivers/ide/falconide.c
@@ -82,7 +82,7 @@ static const struct ide_tp_ops falconide_tp_ops = {
82 82
83static const struct ide_port_info falconide_port_info = { 83static const struct ide_port_info falconide_port_info = {
84 .tp_ops = &falconide_tp_ops, 84 .tp_ops = &falconide_tp_ops,
85 .host_flags = IDE_HFLAG_NO_DMA, 85 .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_SERIALIZE,
86}; 86};
87 87
88static void __init falconide_setup_ports(hw_regs_t *hw) 88static void __init falconide_setup_ports(hw_regs_t *hw)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 312127ea443a..0db1ed9f5fc2 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -649,7 +649,8 @@ static int ide_register_port(ide_hwif_t *hwif)
649 /* register with global device tree */ 649 /* register with global device tree */
650 dev_set_name(&hwif->gendev, hwif->name); 650 dev_set_name(&hwif->gendev, hwif->name);
651 hwif->gendev.driver_data = hwif; 651 hwif->gendev.driver_data = hwif;
652 hwif->gendev.parent = hwif->dev; 652 if (hwif->gendev.parent == NULL)
653 hwif->gendev.parent = hwif->dev;
653 hwif->gendev.release = hwif_release_dev; 654 hwif->gendev.release = hwif_release_dev;
654 655
655 ret = device_register(&hwif->gendev); 656 ret = device_register(&hwif->gendev);
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index a7ac490c9ae3..f38aac78044c 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -346,7 +346,8 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
346{ 346{
347 struct clk *clk; 347 struct clk *clk;
348 struct resource *mem, *irq; 348 struct resource *mem, *irq;
349 unsigned long base, rate; 349 void __iomem *base;
350 unsigned long rate;
350 int i, rc; 351 int i, rc;
351 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; 352 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
352 353
@@ -382,11 +383,13 @@ static int __init palm_bk3710_probe(struct platform_device *pdev)
382 base = IO_ADDRESS(mem->start); 383 base = IO_ADDRESS(mem->start);
383 384
384 /* Configure the Palm Chip controller */ 385 /* Configure the Palm Chip controller */
385 palm_bk3710_chipinit((void __iomem *)base); 386 palm_bk3710_chipinit(base);
386 387
387 for (i = 0; i < IDE_NR_PORTS - 2; i++) 388 for (i = 0; i < IDE_NR_PORTS - 2; i++)
388 hw.io_ports_array[i] = base + IDE_PALM_ATA_PRI_REG_OFFSET + i; 389 hw.io_ports_array[i] = (unsigned long)
389 hw.io_ports.ctl_addr = base + IDE_PALM_ATA_PRI_CTL_OFFSET; 390 (base + IDE_PALM_ATA_PRI_REG_OFFSET + i);
391 hw.io_ports.ctl_addr = (unsigned long)
392 (base + IDE_PALM_ATA_PRI_CTL_OFFSET);
390 hw.irq = irq->start; 393 hw.irq = irq->start;
391 hw.dev = &pdev->dev; 394 hw.dev = &pdev->dev;
392 hw.chipset = ide_palm3710; 395 hw.chipset = ide_palm3710;
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index dc15cadb06ef..38f712036201 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1419,7 +1419,6 @@ static int __devinit add_card(struct pci_dev *dev,
1419 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL); 1419 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
1420 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory"); 1420 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1421 1421
1422 i2c_ad->id = I2C_HW_B_PCILYNX;
1423 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name)); 1422 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
1424 i2c_adapter_data = bit_data; 1423 i2c_adapter_data = bit_data;
1425 i2c_ad->algo_data = &i2c_adapter_data; 1424 i2c_ad->algo_data = &i2c_adapter_data;
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 2f4c28a30271..97e4b231cdc4 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -196,7 +196,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
196 196
197 if (h_ret != H_SUCCESS) { 197 if (h_ret != H_SUCCESS) {
198 ehca_err(device, "hipz_h_alloc_resource_cq() failed " 198 ehca_err(device, "hipz_h_alloc_resource_cq() failed "
199 "h_ret=%li device=%p", h_ret, device); 199 "h_ret=%lli device=%p", h_ret, device);
200 cq = ERR_PTR(ehca2ib_return_code(h_ret)); 200 cq = ERR_PTR(ehca2ib_return_code(h_ret));
201 goto create_cq_exit2; 201 goto create_cq_exit2;
202 } 202 }
@@ -232,7 +232,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
232 232
233 if (h_ret < H_SUCCESS) { 233 if (h_ret < H_SUCCESS) {
234 ehca_err(device, "hipz_h_register_rpage_cq() failed " 234 ehca_err(device, "hipz_h_register_rpage_cq() failed "
235 "ehca_cq=%p cq_num=%x h_ret=%li counter=%i " 235 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
236 "act_pages=%i", my_cq, my_cq->cq_number, 236 "act_pages=%i", my_cq, my_cq->cq_number,
237 h_ret, counter, param.act_pages); 237 h_ret, counter, param.act_pages);
238 cq = ERR_PTR(-EINVAL); 238 cq = ERR_PTR(-EINVAL);
@@ -244,7 +244,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
244 if ((h_ret != H_SUCCESS) || vpage) { 244 if ((h_ret != H_SUCCESS) || vpage) {
245 ehca_err(device, "Registration of pages not " 245 ehca_err(device, "Registration of pages not "
246 "complete ehca_cq=%p cq_num=%x " 246 "complete ehca_cq=%p cq_num=%x "
247 "h_ret=%li", my_cq, my_cq->cq_number, 247 "h_ret=%lli", my_cq, my_cq->cq_number,
248 h_ret); 248 h_ret);
249 cq = ERR_PTR(-EAGAIN); 249 cq = ERR_PTR(-EAGAIN);
250 goto create_cq_exit4; 250 goto create_cq_exit4;
@@ -252,7 +252,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
252 } else { 252 } else {
253 if (h_ret != H_PAGE_REGISTERED) { 253 if (h_ret != H_PAGE_REGISTERED) {
254 ehca_err(device, "Registration of page failed " 254 ehca_err(device, "Registration of page failed "
255 "ehca_cq=%p cq_num=%x h_ret=%li " 255 "ehca_cq=%p cq_num=%x h_ret=%lli "
256 "counter=%i act_pages=%i", 256 "counter=%i act_pages=%i",
257 my_cq, my_cq->cq_number, 257 my_cq, my_cq->cq_number,
258 h_ret, counter, param.act_pages); 258 h_ret, counter, param.act_pages);
@@ -266,7 +266,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
266 266
267 gal = my_cq->galpas.kernel; 267 gal = my_cq->galpas.kernel;
268 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec)); 268 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
269 ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx", 269 ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
270 my_cq, my_cq->cq_number, cqx_fec); 270 my_cq, my_cq->cq_number, cqx_fec);
271 271
272 my_cq->ib_cq.cqe = my_cq->nr_of_entries = 272 my_cq->ib_cq.cqe = my_cq->nr_of_entries =
@@ -307,7 +307,7 @@ create_cq_exit3:
307 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); 307 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
308 if (h_ret != H_SUCCESS) 308 if (h_ret != H_SUCCESS)
309 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p " 309 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
310 "cq_num=%x h_ret=%li", my_cq, my_cq->cq_number, h_ret); 310 "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
311 311
312create_cq_exit2: 312create_cq_exit2:
313 write_lock_irqsave(&ehca_cq_idr_lock, flags); 313 write_lock_irqsave(&ehca_cq_idr_lock, flags);
@@ -355,7 +355,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
355 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 355 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
356 if (h_ret == H_R_STATE) { 356 if (h_ret == H_R_STATE) {
357 /* cq in err: read err data and destroy it forcibly */ 357 /* cq in err: read err data and destroy it forcibly */
358 ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err " 358 ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
359 "state. Try to delete it forcibly.", 359 "state. Try to delete it forcibly.",
360 my_cq, cq_num, my_cq->ipz_cq_handle.handle); 360 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
361 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle); 361 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
@@ -365,7 +365,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
365 cq_num); 365 cq_num);
366 } 366 }
367 if (h_ret != H_SUCCESS) { 367 if (h_ret != H_SUCCESS) {
368 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%li " 368 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
369 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num); 369 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
370 return ehca2ib_return_code(h_ret); 370 return ehca2ib_return_code(h_ret);
371 } 371 }
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 46288220cfbb..9209c5332dfe 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -393,7 +393,7 @@ int ehca_modify_port(struct ib_device *ibdev,
393 hret = hipz_h_modify_port(shca->ipz_hca_handle, port, 393 hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
394 cap, props->init_type, port_modify_mask); 394 cap, props->init_type, port_modify_mask);
395 if (hret != H_SUCCESS) { 395 if (hret != H_SUCCESS) {
396 ehca_err(&shca->ib_device, "Modify port failed h_ret=%li", 396 ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli",
397 hret); 397 hret);
398 ret = -EINVAL; 398 ret = -EINVAL;
399 } 399 }
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3128a5090dbd..99bcbd7ffb0a 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -99,7 +99,7 @@ static void print_error_data(struct ehca_shca *shca, void *data,
99 return; 99 return;
100 100
101 ehca_err(&shca->ib_device, 101 ehca_err(&shca->ib_device,
102 "QP 0x%x (resource=%lx) has errors.", 102 "QP 0x%x (resource=%llx) has errors.",
103 qp->ib_qp.qp_num, resource); 103 qp->ib_qp.qp_num, resource);
104 break; 104 break;
105 } 105 }
@@ -108,21 +108,21 @@ static void print_error_data(struct ehca_shca *shca, void *data,
108 struct ehca_cq *cq = (struct ehca_cq *)data; 108 struct ehca_cq *cq = (struct ehca_cq *)data;
109 109
110 ehca_err(&shca->ib_device, 110 ehca_err(&shca->ib_device,
111 "CQ 0x%x (resource=%lx) has errors.", 111 "CQ 0x%x (resource=%llx) has errors.",
112 cq->cq_number, resource); 112 cq->cq_number, resource);
113 break; 113 break;
114 } 114 }
115 default: 115 default:
116 ehca_err(&shca->ib_device, 116 ehca_err(&shca->ib_device,
117 "Unknown error type: %lx on %s.", 117 "Unknown error type: %llx on %s.",
118 type, shca->ib_device.name); 118 type, shca->ib_device.name);
119 break; 119 break;
120 } 120 }
121 121
122 ehca_err(&shca->ib_device, "Error data is available: %lx.", resource); 122 ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
123 ehca_err(&shca->ib_device, "EHCA ----- error data begin " 123 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
124 "---------------------------------------------------"); 124 "---------------------------------------------------");
125 ehca_dmp(rblock, length, "resource=%lx", resource); 125 ehca_dmp(rblock, length, "resource=%llx", resource);
126 ehca_err(&shca->ib_device, "EHCA ----- error data end " 126 ehca_err(&shca->ib_device, "EHCA ----- error data end "
127 "----------------------------------------------------"); 127 "----------------------------------------------------");
128 128
@@ -152,7 +152,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
152 152
153 if (ret == H_R_STATE) 153 if (ret == H_R_STATE)
154 ehca_err(&shca->ib_device, 154 ehca_err(&shca->ib_device,
155 "No error data is available: %lx.", resource); 155 "No error data is available: %llx.", resource);
156 else if (ret == H_SUCCESS) { 156 else if (ret == H_SUCCESS) {
157 int length; 157 int length;
158 158
@@ -164,7 +164,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
164 print_error_data(shca, data, rblock, length); 164 print_error_data(shca, data, rblock, length);
165 } else 165 } else
166 ehca_err(&shca->ib_device, 166 ehca_err(&shca->ib_device,
167 "Error data could not be fetched: %lx", resource); 167 "Error data could not be fetched: %llx", resource);
168 168
169 ehca_free_fw_ctrlblock(rblock); 169 ehca_free_fw_ctrlblock(rblock);
170 170
@@ -514,7 +514,7 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
514 struct ehca_cq *cq; 514 struct ehca_cq *cq;
515 515
516 eqe_value = eqe->entry; 516 eqe_value = eqe->entry;
517 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); 517 ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
518 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 518 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
519 ehca_dbg(&shca->ib_device, "Got completion event"); 519 ehca_dbg(&shca->ib_device, "Got completion event");
520 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 520 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
@@ -603,7 +603,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
603 ret = hipz_h_eoi(eq->ist); 603 ret = hipz_h_eoi(eq->ist);
604 if (ret != H_SUCCESS) 604 if (ret != H_SUCCESS)
605 ehca_err(&shca->ib_device, 605 ehca_err(&shca->ib_device,
606 "bad return code EOI -rc = %ld\n", ret); 606 "bad return code EOI -rc = %lld\n", ret);
607 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); 607 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
608 } 608 }
609 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE)) 609 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c7b8a506af65..368311ce332b 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -304,7 +304,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
304 304
305 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock); 305 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
306 if (h_ret != H_SUCCESS) { 306 if (h_ret != H_SUCCESS) {
307 ehca_gen_err("Cannot query device properties. h_ret=%li", 307 ehca_gen_err("Cannot query device properties. h_ret=%lli",
308 h_ret); 308 h_ret);
309 ret = -EPERM; 309 ret = -EPERM;
310 goto sense_attributes1; 310 goto sense_attributes1;
@@ -391,7 +391,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
391 port = (struct hipz_query_port *)rblock; 391 port = (struct hipz_query_port *)rblock;
392 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); 392 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
393 if (h_ret != H_SUCCESS) { 393 if (h_ret != H_SUCCESS) {
394 ehca_gen_err("Cannot query port properties. h_ret=%li", 394 ehca_gen_err("Cannot query port properties. h_ret=%lli",
395 h_ret); 395 h_ret);
396 ret = -EPERM; 396 ret = -EPERM;
397 goto sense_attributes1; 397 goto sense_attributes1;
@@ -682,7 +682,7 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
682{ 682{
683 struct ehca_shca *shca = dev->driver_data; 683 struct ehca_shca *shca = dev->driver_data;
684 684
685 return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle); 685 return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
686 686
687} 687}
688static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); 688static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c
index e3ef0264ccc6..120aedf9f989 100644
--- a/drivers/infiniband/hw/ehca/ehca_mcast.c
+++ b/drivers/infiniband/hw/ehca/ehca_mcast.c
@@ -88,7 +88,7 @@ int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
88 if (h_ret != H_SUCCESS) 88 if (h_ret != H_SUCCESS)
89 ehca_err(ibqp->device, 89 ehca_err(ibqp->device,
90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed " 90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
91 "h_ret=%li", my_qp, ibqp->qp_num, h_ret); 91 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
92 92
93 return ehca2ib_return_code(h_ret); 93 return ehca2ib_return_code(h_ret);
94} 94}
@@ -125,7 +125,7 @@ int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
125 if (h_ret != H_SUCCESS) 125 if (h_ret != H_SUCCESS)
126 ehca_err(ibqp->device, 126 ehca_err(ibqp->device,
127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed " 127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
128 "h_ret=%li", my_qp, ibqp->qp_num, h_ret); 128 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
129 129
130 return ehca2ib_return_code(h_ret); 130 return ehca2ib_return_code(h_ret);
131} 131}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index f974367cad40..72f83f7df614 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -204,7 +204,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
204 } 204 }
205 if ((size == 0) || 205 if ((size == 0) ||
206 (((u64)iova_start + size) < (u64)iova_start)) { 206 (((u64)iova_start + size) < (u64)iova_start)) {
207 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p", 207 ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
208 size, iova_start); 208 size, iova_start);
209 ib_mr = ERR_PTR(-EINVAL); 209 ib_mr = ERR_PTR(-EINVAL);
210 goto reg_phys_mr_exit0; 210 goto reg_phys_mr_exit0;
@@ -309,8 +309,8 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
309 } 309 }
310 310
311 if (length == 0 || virt + length < virt) { 311 if (length == 0 || virt + length < virt) {
312 ehca_err(pd->device, "bad input values: length=%lx " 312 ehca_err(pd->device, "bad input values: length=%llx "
313 "virt_base=%lx", length, virt); 313 "virt_base=%llx", length, virt);
314 ib_mr = ERR_PTR(-EINVAL); 314 ib_mr = ERR_PTR(-EINVAL);
315 goto reg_user_mr_exit0; 315 goto reg_user_mr_exit0;
316 } 316 }
@@ -373,7 +373,7 @@ reg_user_mr_fallback:
373 &e_mr->ib.ib_mr.rkey); 373 &e_mr->ib.ib_mr.rkey);
374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { 374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
375 ehca_warn(pd->device, "failed to register mr " 375 ehca_warn(pd->device, "failed to register mr "
376 "with hwpage_size=%lx", hwpage_size); 376 "with hwpage_size=%llx", hwpage_size);
377 ehca_info(pd->device, "try to register mr with " 377 ehca_info(pd->device, "try to register mr with "
378 "kpage_size=%lx", PAGE_SIZE); 378 "kpage_size=%lx", PAGE_SIZE);
379 /* 379 /*
@@ -509,7 +509,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
509 goto rereg_phys_mr_exit1; 509 goto rereg_phys_mr_exit1;
510 if ((new_size == 0) || 510 if ((new_size == 0) ||
511 (((u64)iova_start + new_size) < (u64)iova_start)) { 511 (((u64)iova_start + new_size) < (u64)iova_start)) {
512 ehca_err(mr->device, "bad input values: new_size=%lx " 512 ehca_err(mr->device, "bad input values: new_size=%llx "
513 "iova_start=%p", new_size, iova_start); 513 "iova_start=%p", new_size, iova_start);
514 ret = -EINVAL; 514 ret = -EINVAL;
515 goto rereg_phys_mr_exit1; 515 goto rereg_phys_mr_exit1;
@@ -580,8 +580,8 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
580 580
581 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); 581 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
582 if (h_ret != H_SUCCESS) { 582 if (h_ret != H_SUCCESS) {
583 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%li mr=%p " 583 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
584 "hca_hndl=%lx mr_hndl=%lx lkey=%x", 584 "hca_hndl=%llx mr_hndl=%llx lkey=%x",
585 h_ret, mr, shca->ipz_hca_handle.handle, 585 h_ret, mr, shca->ipz_hca_handle.handle,
586 e_mr->ipz_mr_handle.handle, mr->lkey); 586 e_mr->ipz_mr_handle.handle, mr->lkey);
587 ret = ehca2ib_return_code(h_ret); 587 ret = ehca2ib_return_code(h_ret);
@@ -630,8 +630,8 @@ int ehca_dereg_mr(struct ib_mr *mr)
630 /* TODO: BUSY: MR still has bound window(s) */ 630 /* TODO: BUSY: MR still has bound window(s) */
631 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 631 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
632 if (h_ret != H_SUCCESS) { 632 if (h_ret != H_SUCCESS) {
633 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%li shca=%p " 633 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
634 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x", 634 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
635 h_ret, shca, e_mr, shca->ipz_hca_handle.handle, 635 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
636 e_mr->ipz_mr_handle.handle, mr->lkey); 636 e_mr->ipz_mr_handle.handle, mr->lkey);
637 ret = ehca2ib_return_code(h_ret); 637 ret = ehca2ib_return_code(h_ret);
@@ -671,8 +671,8 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
671 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, 671 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
672 e_pd->fw_pd, &hipzout); 672 e_pd->fw_pd, &hipzout);
673 if (h_ret != H_SUCCESS) { 673 if (h_ret != H_SUCCESS) {
674 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%li " 674 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
675 "shca=%p hca_hndl=%lx mw=%p", 675 "shca=%p hca_hndl=%llx mw=%p",
676 h_ret, shca, shca->ipz_hca_handle.handle, e_mw); 676 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
677 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret)); 677 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
678 goto alloc_mw_exit1; 678 goto alloc_mw_exit1;
@@ -713,8 +713,8 @@ int ehca_dealloc_mw(struct ib_mw *mw)
713 713
714 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); 714 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
715 if (h_ret != H_SUCCESS) { 715 if (h_ret != H_SUCCESS) {
716 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%li shca=%p " 716 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
717 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx", 717 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
718 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, 718 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
719 e_mw->ipz_mw_handle.handle); 719 e_mw->ipz_mw_handle.handle);
720 return ehca2ib_return_code(h_ret); 720 return ehca2ib_return_code(h_ret);
@@ -840,7 +840,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
840 goto map_phys_fmr_exit0; 840 goto map_phys_fmr_exit0;
841 if (iova % e_fmr->fmr_page_size) { 841 if (iova % e_fmr->fmr_page_size) {
842 /* only whole-numbered pages */ 842 /* only whole-numbered pages */
843 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x", 843 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
844 iova, e_fmr->fmr_page_size); 844 iova, e_fmr->fmr_page_size);
845 ret = -EINVAL; 845 ret = -EINVAL;
846 goto map_phys_fmr_exit0; 846 goto map_phys_fmr_exit0;
@@ -878,7 +878,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
878map_phys_fmr_exit0: 878map_phys_fmr_exit0:
879 if (ret) 879 if (ret)
880 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x " 880 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
881 "iova=%lx", ret, fmr, page_list, list_len, iova); 881 "iova=%llx", ret, fmr, page_list, list_len, iova);
882 return ret; 882 return ret;
883} /* end ehca_map_phys_fmr() */ 883} /* end ehca_map_phys_fmr() */
884 884
@@ -964,8 +964,8 @@ int ehca_dealloc_fmr(struct ib_fmr *fmr)
964 964
965 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 965 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
966 if (h_ret != H_SUCCESS) { 966 if (h_ret != H_SUCCESS) {
967 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%li e_fmr=%p " 967 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
968 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x", 968 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
969 h_ret, e_fmr, shca->ipz_hca_handle.handle, 969 h_ret, e_fmr, shca->ipz_hca_handle.handle,
970 e_fmr->ipz_mr_handle.handle, fmr->lkey); 970 e_fmr->ipz_mr_handle.handle, fmr->lkey);
971 ret = ehca2ib_return_code(h_ret); 971 ret = ehca2ib_return_code(h_ret);
@@ -1007,8 +1007,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
1007 (u64)iova_start, size, hipz_acl, 1007 (u64)iova_start, size, hipz_acl,
1008 e_pd->fw_pd, &hipzout); 1008 e_pd->fw_pd, &hipzout);
1009 if (h_ret != H_SUCCESS) { 1009 if (h_ret != H_SUCCESS) {
1010 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%li " 1010 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
1011 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle); 1011 "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
1012 ret = ehca2ib_return_code(h_ret); 1012 ret = ehca2ib_return_code(h_ret);
1013 goto ehca_reg_mr_exit0; 1013 goto ehca_reg_mr_exit0;
1014 } 1014 }
@@ -1033,9 +1033,9 @@ int ehca_reg_mr(struct ehca_shca *shca,
1033ehca_reg_mr_exit1: 1033ehca_reg_mr_exit1:
1034 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1034 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1035 if (h_ret != H_SUCCESS) { 1035 if (h_ret != H_SUCCESS) {
1036 ehca_err(&shca->ib_device, "h_ret=%li shca=%p e_mr=%p " 1036 ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
1037 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " 1037 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
1038 "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%i", 1038 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
1039 h_ret, shca, e_mr, iova_start, size, acl, e_pd, 1039 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
1040 hipzout.lkey, pginfo, pginfo->num_kpages, 1040 hipzout.lkey, pginfo, pginfo->num_kpages,
1041 pginfo->num_hwpages, ret); 1041 pginfo->num_hwpages, ret);
@@ -1045,8 +1045,8 @@ ehca_reg_mr_exit1:
1045ehca_reg_mr_exit0: 1045ehca_reg_mr_exit0:
1046 if (ret) 1046 if (ret)
1047 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1047 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1048 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1048 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1049 "num_kpages=%lx num_hwpages=%lx", 1049 "num_kpages=%llx num_hwpages=%llx",
1050 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, 1050 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1051 pginfo->num_kpages, pginfo->num_hwpages); 1051 pginfo->num_kpages, pginfo->num_hwpages);
1052 return ret; 1052 return ret;
@@ -1116,8 +1116,8 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1116 */ 1116 */
1117 if (h_ret != H_SUCCESS) { 1117 if (h_ret != H_SUCCESS) {
1118 ehca_err(&shca->ib_device, "last " 1118 ehca_err(&shca->ib_device, "last "
1119 "hipz_reg_rpage_mr failed, h_ret=%li " 1119 "hipz_reg_rpage_mr failed, h_ret=%lli "
1120 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx" 1120 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
1121 " lkey=%x", h_ret, e_mr, i, 1121 " lkey=%x", h_ret, e_mr, i,
1122 shca->ipz_hca_handle.handle, 1122 shca->ipz_hca_handle.handle,
1123 e_mr->ipz_mr_handle.handle, 1123 e_mr->ipz_mr_handle.handle,
@@ -1128,8 +1128,8 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1128 ret = 0; 1128 ret = 0;
1129 } else if (h_ret != H_PAGE_REGISTERED) { 1129 } else if (h_ret != H_PAGE_REGISTERED) {
1130 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " 1130 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1131 "h_ret=%li e_mr=%p i=%x lkey=%x hca_hndl=%lx " 1131 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
1132 "mr_hndl=%lx", h_ret, e_mr, i, 1132 "mr_hndl=%llx", h_ret, e_mr, i,
1133 e_mr->ib.ib_mr.lkey, 1133 e_mr->ib.ib_mr.lkey,
1134 shca->ipz_hca_handle.handle, 1134 shca->ipz_hca_handle.handle,
1135 e_mr->ipz_mr_handle.handle); 1135 e_mr->ipz_mr_handle.handle);
@@ -1145,7 +1145,7 @@ ehca_reg_mr_rpages_exit1:
1145ehca_reg_mr_rpages_exit0: 1145ehca_reg_mr_rpages_exit0:
1146 if (ret) 1146 if (ret)
1147 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p " 1147 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
1148 "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr, 1148 "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
1149 pginfo, pginfo->num_kpages, pginfo->num_hwpages); 1149 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
1150 return ret; 1150 return ret;
1151} /* end ehca_reg_mr_rpages() */ 1151} /* end ehca_reg_mr_rpages() */
@@ -1184,7 +1184,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1184 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage); 1184 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
1185 if (ret) { 1185 if (ret) {
1186 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " 1186 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1187 "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx " 1187 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
1188 "kpage=%p", e_mr, pginfo, pginfo->type, 1188 "kpage=%p", e_mr, pginfo, pginfo->type,
1189 pginfo->num_kpages, pginfo->num_hwpages, kpage); 1189 pginfo->num_kpages, pginfo->num_hwpages, kpage);
1190 goto ehca_rereg_mr_rereg1_exit1; 1190 goto ehca_rereg_mr_rereg1_exit1;
@@ -1205,13 +1205,13 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1205 * (MW bound or MR is shared) 1205 * (MW bound or MR is shared)
1206 */ 1206 */
1207 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " 1207 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1208 "(Rereg1), h_ret=%li e_mr=%p", h_ret, e_mr); 1208 "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
1209 *pginfo = pginfo_save; 1209 *pginfo = pginfo_save;
1210 ret = -EAGAIN; 1210 ret = -EAGAIN;
1211 } else if ((u64 *)hipzout.vaddr != iova_start) { 1211 } else if ((u64 *)hipzout.vaddr != iova_start) {
1212 ehca_err(&shca->ib_device, "PHYP changed iova_start in " 1212 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1213 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " 1213 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
1214 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, 1214 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
1215 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, 1215 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1216 e_mr->ib.ib_mr.lkey, hipzout.lkey); 1216 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1217 ret = -EFAULT; 1217 ret = -EFAULT;
@@ -1235,7 +1235,7 @@ ehca_rereg_mr_rereg1_exit1:
1235ehca_rereg_mr_rereg1_exit0: 1235ehca_rereg_mr_rereg1_exit0:
1236 if ( ret && (ret != -EAGAIN) ) 1236 if ( ret && (ret != -EAGAIN) )
1237 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x " 1237 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
1238 "pginfo=%p num_kpages=%lx num_hwpages=%lx", 1238 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
1239 ret, *lkey, *rkey, pginfo, pginfo->num_kpages, 1239 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
1240 pginfo->num_hwpages); 1240 pginfo->num_hwpages);
1241 return ret; 1241 return ret;
@@ -1263,7 +1263,7 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1263 (e_mr->num_hwpages > MAX_RPAGES) || 1263 (e_mr->num_hwpages > MAX_RPAGES) ||
1264 (pginfo->num_hwpages > e_mr->num_hwpages)) { 1264 (pginfo->num_hwpages > e_mr->num_hwpages)) {
1265 ehca_dbg(&shca->ib_device, "Rereg3 case, " 1265 ehca_dbg(&shca->ib_device, "Rereg3 case, "
1266 "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x", 1266 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
1267 pginfo->num_hwpages, e_mr->num_hwpages); 1267 pginfo->num_hwpages, e_mr->num_hwpages);
1268 rereg_1_hcall = 0; 1268 rereg_1_hcall = 0;
1269 rereg_3_hcall = 1; 1269 rereg_3_hcall = 1;
@@ -1295,7 +1295,7 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1295 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1295 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1296 if (h_ret != H_SUCCESS) { 1296 if (h_ret != H_SUCCESS) {
1297 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1297 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1298 "h_ret=%li e_mr=%p hca_hndl=%lx mr_hndl=%lx " 1298 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1299 "mr->lkey=%x", 1299 "mr->lkey=%x",
1300 h_ret, e_mr, shca->ipz_hca_handle.handle, 1300 h_ret, e_mr, shca->ipz_hca_handle.handle,
1301 e_mr->ipz_mr_handle.handle, 1301 e_mr->ipz_mr_handle.handle,
@@ -1328,8 +1328,8 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1328ehca_rereg_mr_exit0: 1328ehca_rereg_mr_exit0:
1329 if (ret) 1329 if (ret)
1330 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1330 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1331 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1331 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1332 "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " 1332 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1333 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, 1333 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1334 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, 1334 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1335 rereg_1_hcall, rereg_3_hcall); 1335 rereg_1_hcall, rereg_3_hcall);
@@ -1371,8 +1371,8 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1371 * FMRs are not shared and no MW bound to FMRs 1371 * FMRs are not shared and no MW bound to FMRs
1372 */ 1372 */
1373 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " 1373 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1374 "(Rereg1), h_ret=%li e_fmr=%p hca_hndl=%lx " 1374 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1375 "mr_hndl=%lx lkey=%x lkey_out=%x", 1375 "mr_hndl=%llx lkey=%x lkey_out=%x",
1376 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1376 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1377 e_fmr->ipz_mr_handle.handle, 1377 e_fmr->ipz_mr_handle.handle,
1378 e_fmr->ib.ib_fmr.lkey, hipzout.lkey); 1378 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
@@ -1383,7 +1383,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1383 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 1383 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1384 if (h_ret != H_SUCCESS) { 1384 if (h_ret != H_SUCCESS) {
1385 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1385 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1386 "h_ret=%li e_fmr=%p hca_hndl=%lx mr_hndl=%lx " 1386 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1387 "lkey=%x", 1387 "lkey=%x",
1388 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1388 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1389 e_fmr->ipz_mr_handle.handle, 1389 e_fmr->ipz_mr_handle.handle,
@@ -1447,9 +1447,9 @@ int ehca_reg_smr(struct ehca_shca *shca,
1447 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1447 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1448 &hipzout); 1448 &hipzout);
1449 if (h_ret != H_SUCCESS) { 1449 if (h_ret != H_SUCCESS) {
1450 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " 1450 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1451 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " 1451 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1452 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", 1452 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1453 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, 1453 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1454 shca->ipz_hca_handle.handle, 1454 shca->ipz_hca_handle.handle,
1455 e_origmr->ipz_mr_handle.handle, 1455 e_origmr->ipz_mr_handle.handle,
@@ -1527,7 +1527,7 @@ int ehca_reg_internal_maxmr(
1527 &e_mr->ib.ib_mr.rkey); 1527 &e_mr->ib.ib_mr.rkey);
1528 if (ret) { 1528 if (ret) {
1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1530 "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x " 1530 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1531 "num_hwpages=%x", e_mr, iova_start, size_maxmr, 1531 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1532 num_kpages, num_hwpages); 1532 num_kpages, num_hwpages);
1533 goto ehca_reg_internal_maxmr_exit1; 1533 goto ehca_reg_internal_maxmr_exit1;
@@ -1573,8 +1573,8 @@ int ehca_reg_maxmr(struct ehca_shca *shca,
1573 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1573 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1574 &hipzout); 1574 &hipzout);
1575 if (h_ret != H_SUCCESS) { 1575 if (h_ret != H_SUCCESS) {
1576 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " 1576 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1577 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", 1577 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1578 h_ret, e_origmr, shca->ipz_hca_handle.handle, 1578 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1579 e_origmr->ipz_mr_handle.handle, 1579 e_origmr->ipz_mr_handle.handle,
1580 e_origmr->ib.ib_mr.lkey); 1580 e_origmr->ib.ib_mr.lkey);
@@ -1651,28 +1651,28 @@ int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1651 /* check first buffer */ 1651 /* check first buffer */
1652 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { 1652 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1653 ehca_gen_err("iova_start/addr mismatch, iova_start=%p " 1653 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1654 "pbuf->addr=%lx pbuf->size=%lx", 1654 "pbuf->addr=%llx pbuf->size=%llx",
1655 iova_start, pbuf->addr, pbuf->size); 1655 iova_start, pbuf->addr, pbuf->size);
1656 return -EINVAL; 1656 return -EINVAL;
1657 } 1657 }
1658 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && 1658 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1659 (num_phys_buf > 1)) { 1659 (num_phys_buf > 1)) {
1660 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx " 1660 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
1661 "pbuf->size=%lx", pbuf->addr, pbuf->size); 1661 "pbuf->size=%llx", pbuf->addr, pbuf->size);
1662 return -EINVAL; 1662 return -EINVAL;
1663 } 1663 }
1664 1664
1665 for (i = 0; i < num_phys_buf; i++) { 1665 for (i = 0; i < num_phys_buf; i++) {
1666 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { 1666 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1667 ehca_gen_err("bad address, i=%x pbuf->addr=%lx " 1667 ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
1668 "pbuf->size=%lx", 1668 "pbuf->size=%llx",
1669 i, pbuf->addr, pbuf->size); 1669 i, pbuf->addr, pbuf->size);
1670 return -EINVAL; 1670 return -EINVAL;
1671 } 1671 }
1672 if (((i > 0) && /* not 1st */ 1672 if (((i > 0) && /* not 1st */
1673 (i < (num_phys_buf - 1)) && /* not last */ 1673 (i < (num_phys_buf - 1)) && /* not last */
1674 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { 1674 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1675 ehca_gen_err("bad size, i=%x pbuf->size=%lx", 1675 ehca_gen_err("bad size, i=%x pbuf->size=%llx",
1676 i, pbuf->size); 1676 i, pbuf->size);
1677 return -EINVAL; 1677 return -EINVAL;
1678 } 1678 }
@@ -1705,7 +1705,7 @@ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1705 page = page_list; 1705 page = page_list;
1706 for (i = 0; i < list_len; i++) { 1706 for (i = 0; i < list_len; i++) {
1707 if (*page % e_fmr->fmr_page_size) { 1707 if (*page % e_fmr->fmr_page_size) {
1708 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p " 1708 ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
1709 "fmr_page_size=%x", i, *page, page, e_fmr, 1709 "fmr_page_size=%x", i, *page, page, e_fmr,
1710 e_fmr->fmr_page_size); 1710 e_fmr->fmr_page_size);
1711 return -EINVAL; 1711 return -EINVAL;
@@ -1743,9 +1743,9 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1743 (pginfo->next_hwpage * 1743 (pginfo->next_hwpage *
1744 pginfo->hwpage_size)); 1744 pginfo->hwpage_size));
1745 if ( !(*kpage) ) { 1745 if ( !(*kpage) ) {
1746 ehca_gen_err("pgaddr=%lx " 1746 ehca_gen_err("pgaddr=%llx "
1747 "chunk->page_list[i]=%lx " 1747 "chunk->page_list[i]=%llx "
1748 "i=%x next_hwpage=%lx", 1748 "i=%x next_hwpage=%llx",
1749 pgaddr, (u64)sg_dma_address( 1749 pgaddr, (u64)sg_dma_address(
1750 &chunk->page_list[i]), 1750 &chunk->page_list[i]),
1751 i, pginfo->next_hwpage); 1751 i, pginfo->next_hwpage);
@@ -1795,11 +1795,11 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1795 for (t = start_idx; t <= end_idx; t++) { 1795 for (t = start_idx; t <= end_idx; t++) {
1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; 1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
1797 if (ehca_debug_level >= 3) 1797 if (ehca_debug_level >= 3)
1798 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1798 ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1801 ehca_gen_err("uncontiguous page found pgaddr=%lx " 1801 ehca_gen_err("uncontiguous page found pgaddr=%llx "
1802 "prev_pgaddr=%lx page_list_i=%x", 1802 "prev_pgaddr=%llx page_list_i=%x",
1803 pgaddr, *prev_pgaddr, t); 1803 pgaddr, *prev_pgaddr, t);
1804 return -EINVAL; 1804 return -EINVAL;
1805 } 1805 }
@@ -1833,7 +1833,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1833 << PAGE_SHIFT ); 1833 << PAGE_SHIFT );
1834 *kpage = phys_to_abs(pgaddr); 1834 *kpage = phys_to_abs(pgaddr);
1835 if ( !(*kpage) ) { 1835 if ( !(*kpage) ) {
1836 ehca_gen_err("pgaddr=%lx i=%x", 1836 ehca_gen_err("pgaddr=%llx i=%x",
1837 pgaddr, i); 1837 pgaddr, i);
1838 ret = -EFAULT; 1838 ret = -EFAULT;
1839 return ret; 1839 return ret;
@@ -1846,8 +1846,8 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1846 if (pginfo->hwpage_cnt) { 1846 if (pginfo->hwpage_cnt) {
1847 ehca_gen_err( 1847 ehca_gen_err(
1848 "invalid alignment " 1848 "invalid alignment "
1849 "pgaddr=%lx i=%x " 1849 "pgaddr=%llx i=%x "
1850 "mr_pgsize=%lx", 1850 "mr_pgsize=%llx",
1851 pgaddr, i, 1851 pgaddr, i,
1852 pginfo->hwpage_size); 1852 pginfo->hwpage_size);
1853 ret = -EFAULT; 1853 ret = -EFAULT;
@@ -1866,8 +1866,8 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1866 if (ehca_debug_level >= 3) { 1866 if (ehca_debug_level >= 3) {
1867 u64 val = *(u64 *)abs_to_virt( 1867 u64 val = *(u64 *)abs_to_virt(
1868 phys_to_abs(pgaddr)); 1868 phys_to_abs(pgaddr));
1869 ehca_gen_dbg("kpage=%lx chunk_page=%lx " 1869 ehca_gen_dbg("kpage=%llx chunk_page=%llx "
1870 "value=%016lx", 1870 "value=%016llx",
1871 *kpage, pgaddr, val); 1871 *kpage, pgaddr, val);
1872 } 1872 }
1873 prev_pgaddr = pgaddr; 1873 prev_pgaddr = pgaddr;
@@ -1944,9 +1944,9 @@ static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1944 if ((pginfo->kpage_cnt >= pginfo->num_kpages) || 1944 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1945 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { 1945 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1946 ehca_gen_err("kpage_cnt >= num_kpages, " 1946 ehca_gen_err("kpage_cnt >= num_kpages, "
1947 "kpage_cnt=%lx num_kpages=%lx " 1947 "kpage_cnt=%llx num_kpages=%llx "
1948 "hwpage_cnt=%lx " 1948 "hwpage_cnt=%llx "
1949 "num_hwpages=%lx i=%x", 1949 "num_hwpages=%llx i=%x",
1950 pginfo->kpage_cnt, 1950 pginfo->kpage_cnt,
1951 pginfo->num_kpages, 1951 pginfo->num_kpages,
1952 pginfo->hwpage_cnt, 1952 pginfo->hwpage_cnt,
@@ -1957,8 +1957,8 @@ static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1957 (pbuf->addr & ~(pginfo->hwpage_size - 1)) + 1957 (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
1958 (pginfo->next_hwpage * pginfo->hwpage_size)); 1958 (pginfo->next_hwpage * pginfo->hwpage_size));
1959 if ( !(*kpage) && pbuf->addr ) { 1959 if ( !(*kpage) && pbuf->addr ) {
1960 ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx " 1960 ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
1961 "next_hwpage=%lx", pbuf->addr, 1961 "next_hwpage=%llx", pbuf->addr,
1962 pbuf->size, pginfo->next_hwpage); 1962 pbuf->size, pginfo->next_hwpage);
1963 return -EFAULT; 1963 return -EFAULT;
1964 } 1964 }
@@ -1996,8 +1996,8 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1996 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + 1996 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
1997 pginfo->next_hwpage * pginfo->hwpage_size); 1997 pginfo->next_hwpage * pginfo->hwpage_size);
1998 if ( !(*kpage) ) { 1998 if ( !(*kpage) ) {
1999 ehca_gen_err("*fmrlist=%lx fmrlist=%p " 1999 ehca_gen_err("*fmrlist=%llx fmrlist=%p "
2000 "next_listelem=%lx next_hwpage=%lx", 2000 "next_listelem=%llx next_hwpage=%llx",
2001 *fmrlist, fmrlist, 2001 *fmrlist, fmrlist,
2002 pginfo->u.fmr.next_listelem, 2002 pginfo->u.fmr.next_listelem,
2003 pginfo->next_hwpage); 2003 pginfo->next_hwpage);
@@ -2025,7 +2025,7 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
2025 ~(pginfo->hwpage_size - 1)); 2025 ~(pginfo->hwpage_size - 1));
2026 if (prev + pginfo->u.fmr.fmr_pgsize != p) { 2026 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
2027 ehca_gen_err("uncontiguous fmr pages " 2027 ehca_gen_err("uncontiguous fmr pages "
2028 "found prev=%lx p=%lx " 2028 "found prev=%llx p=%llx "
2029 "idx=%x", prev, p, i + j); 2029 "idx=%x", prev, p, i + j);
2030 return -EINVAL; 2030 return -EINVAL;
2031 } 2031 }
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index f161cf173dbe..00c108159714 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -331,7 +331,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
331 if (cnt == (nr_q_pages - 1)) { /* last page! */ 331 if (cnt == (nr_q_pages - 1)) { /* last page! */
332 if (h_ret != expected_hret) { 332 if (h_ret != expected_hret) {
333 ehca_err(ib_dev, "hipz_qp_register_rpage() " 333 ehca_err(ib_dev, "hipz_qp_register_rpage() "
334 "h_ret=%li", h_ret); 334 "h_ret=%lli", h_ret);
335 ret = ehca2ib_return_code(h_ret); 335 ret = ehca2ib_return_code(h_ret);
336 goto init_qp_queue1; 336 goto init_qp_queue1;
337 } 337 }
@@ -345,7 +345,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
345 } else { 345 } else {
346 if (h_ret != H_PAGE_REGISTERED) { 346 if (h_ret != H_PAGE_REGISTERED) {
347 ehca_err(ib_dev, "hipz_qp_register_rpage() " 347 ehca_err(ib_dev, "hipz_qp_register_rpage() "
348 "h_ret=%li", h_ret); 348 "h_ret=%lli", h_ret);
349 ret = ehca2ib_return_code(h_ret); 349 ret = ehca2ib_return_code(h_ret);
350 goto init_qp_queue1; 350 goto init_qp_queue1;
351 } 351 }
@@ -709,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
709 709
710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
711 if (h_ret != H_SUCCESS) { 711 if (h_ret != H_SUCCESS) {
712 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li", 712 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
713 h_ret); 713 h_ret);
714 ret = ehca2ib_return_code(h_ret); 714 ret = ehca2ib_return_code(h_ret);
715 goto create_qp_exit1; 715 goto create_qp_exit1;
@@ -1010,7 +1010,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
1010 mqpcb, my_qp->galpas.kernel); 1010 mqpcb, my_qp->galpas.kernel);
1011 if (hret != H_SUCCESS) { 1011 if (hret != H_SUCCESS) {
1012 ehca_err(pd->device, "Could not modify SRQ to INIT " 1012 ehca_err(pd->device, "Could not modify SRQ to INIT "
1013 "ehca_qp=%p qp_num=%x h_ret=%li", 1013 "ehca_qp=%p qp_num=%x h_ret=%lli",
1014 my_qp, my_qp->real_qp_num, hret); 1014 my_qp, my_qp->real_qp_num, hret);
1015 goto create_srq2; 1015 goto create_srq2;
1016 } 1016 }
@@ -1024,7 +1024,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
1024 mqpcb, my_qp->galpas.kernel); 1024 mqpcb, my_qp->galpas.kernel);
1025 if (hret != H_SUCCESS) { 1025 if (hret != H_SUCCESS) {
1026 ehca_err(pd->device, "Could not enable SRQ " 1026 ehca_err(pd->device, "Could not enable SRQ "
1027 "ehca_qp=%p qp_num=%x h_ret=%li", 1027 "ehca_qp=%p qp_num=%x h_ret=%lli",
1028 my_qp, my_qp->real_qp_num, hret); 1028 my_qp, my_qp->real_qp_num, hret);
1029 goto create_srq2; 1029 goto create_srq2;
1030 } 1030 }
@@ -1038,7 +1038,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
1038 mqpcb, my_qp->galpas.kernel); 1038 mqpcb, my_qp->galpas.kernel);
1039 if (hret != H_SUCCESS) { 1039 if (hret != H_SUCCESS) {
1040 ehca_err(pd->device, "Could not modify SRQ to RTR " 1040 ehca_err(pd->device, "Could not modify SRQ to RTR "
1041 "ehca_qp=%p qp_num=%x h_ret=%li", 1041 "ehca_qp=%p qp_num=%x h_ret=%lli",
1042 my_qp, my_qp->real_qp_num, hret); 1042 my_qp, my_qp->real_qp_num, hret);
1043 goto create_srq2; 1043 goto create_srq2;
1044 } 1044 }
@@ -1078,7 +1078,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1078 &bad_send_wqe_p, NULL, 2); 1078 &bad_send_wqe_p, NULL, 2);
1079 if (h_ret != H_SUCCESS) { 1079 if (h_ret != H_SUCCESS) {
1080 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed" 1080 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
1081 " ehca_qp=%p qp_num=%x h_ret=%li", 1081 " ehca_qp=%p qp_num=%x h_ret=%lli",
1082 my_qp, qp_num, h_ret); 1082 my_qp, qp_num, h_ret);
1083 return ehca2ib_return_code(h_ret); 1083 return ehca2ib_return_code(h_ret);
1084 } 1084 }
@@ -1134,7 +1134,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1134 1134
1135 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) { 1135 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1136 ehca_gen_err("Invalid offset for calculating left cqes " 1136 ehca_gen_err("Invalid offset for calculating left cqes "
1137 "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v); 1137 "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
1138 return -EFAULT; 1138 return -EFAULT;
1139 } 1139 }
1140 1140
@@ -1168,7 +1168,7 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1168 &send_wqe_p, &recv_wqe_p, 4); 1168 &send_wqe_p, &recv_wqe_p, 4);
1169 if (h_ret != H_SUCCESS) { 1169 if (h_ret != H_SUCCESS) {
1170 ehca_err(&shca->ib_device, "disable_and_get_wqe() " 1170 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1171 "failed ehca_qp=%p qp_num=%x h_ret=%li", 1171 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
1172 my_qp, qp_num, h_ret); 1172 my_qp, qp_num, h_ret);
1173 return ehca2ib_return_code(h_ret); 1173 return ehca2ib_return_code(h_ret);
1174 } 1174 }
@@ -1261,7 +1261,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1261 mqpcb, my_qp->galpas.kernel); 1261 mqpcb, my_qp->galpas.kernel);
1262 if (h_ret != H_SUCCESS) { 1262 if (h_ret != H_SUCCESS) {
1263 ehca_err(ibqp->device, "hipz_h_query_qp() failed " 1263 ehca_err(ibqp->device, "hipz_h_query_qp() failed "
1264 "ehca_qp=%p qp_num=%x h_ret=%li", 1264 "ehca_qp=%p qp_num=%x h_ret=%lli",
1265 my_qp, ibqp->qp_num, h_ret); 1265 my_qp, ibqp->qp_num, h_ret);
1266 ret = ehca2ib_return_code(h_ret); 1266 ret = ehca2ib_return_code(h_ret);
1267 goto modify_qp_exit1; 1267 goto modify_qp_exit1;
@@ -1690,7 +1690,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1690 1690
1691 if (h_ret != H_SUCCESS) { 1691 if (h_ret != H_SUCCESS) {
1692 ret = ehca2ib_return_code(h_ret); 1692 ret = ehca2ib_return_code(h_ret);
1693 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li " 1693 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
1694 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); 1694 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
1695 goto modify_qp_exit2; 1695 goto modify_qp_exit2;
1696 } 1696 }
@@ -1723,7 +1723,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1723 ret = ehca2ib_return_code(h_ret); 1723 ret = ehca2ib_return_code(h_ret);
1724 ehca_err(ibqp->device, "ENABLE in context of " 1724 ehca_err(ibqp->device, "ENABLE in context of "
1725 "RESET_2_INIT failed! Maybe you didn't get " 1725 "RESET_2_INIT failed! Maybe you didn't get "
1726 "a LID h_ret=%li ehca_qp=%p qp_num=%x", 1726 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
1727 h_ret, my_qp, ibqp->qp_num); 1727 h_ret, my_qp, ibqp->qp_num);
1728 goto modify_qp_exit2; 1728 goto modify_qp_exit2;
1729 } 1729 }
@@ -1909,7 +1909,7 @@ int ehca_query_qp(struct ib_qp *qp,
1909 if (h_ret != H_SUCCESS) { 1909 if (h_ret != H_SUCCESS) {
1910 ret = ehca2ib_return_code(h_ret); 1910 ret = ehca2ib_return_code(h_ret);
1911 ehca_err(qp->device, "hipz_h_query_qp() failed " 1911 ehca_err(qp->device, "hipz_h_query_qp() failed "
1912 "ehca_qp=%p qp_num=%x h_ret=%li", 1912 "ehca_qp=%p qp_num=%x h_ret=%lli",
1913 my_qp, qp->qp_num, h_ret); 1913 my_qp, qp->qp_num, h_ret);
1914 goto query_qp_exit1; 1914 goto query_qp_exit1;
1915 } 1915 }
@@ -2074,7 +2074,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2074 2074
2075 if (h_ret != H_SUCCESS) { 2075 if (h_ret != H_SUCCESS) {
2076 ret = ehca2ib_return_code(h_ret); 2076 ret = ehca2ib_return_code(h_ret);
2077 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li " 2077 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
2078 "ehca_qp=%p qp_num=%x", 2078 "ehca_qp=%p qp_num=%x",
2079 h_ret, my_qp, my_qp->real_qp_num); 2079 h_ret, my_qp, my_qp->real_qp_num);
2080 } 2080 }
@@ -2108,7 +2108,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2108 if (h_ret != H_SUCCESS) { 2108 if (h_ret != H_SUCCESS) {
2109 ret = ehca2ib_return_code(h_ret); 2109 ret = ehca2ib_return_code(h_ret);
2110 ehca_err(srq->device, "hipz_h_query_qp() failed " 2110 ehca_err(srq->device, "hipz_h_query_qp() failed "
2111 "ehca_qp=%p qp_num=%x h_ret=%li", 2111 "ehca_qp=%p qp_num=%x h_ret=%lli",
2112 my_qp, my_qp->real_qp_num, h_ret); 2112 my_qp, my_qp->real_qp_num, h_ret);
2113 goto query_srq_exit1; 2113 goto query_srq_exit1;
2114 } 2114 }
@@ -2179,7 +2179,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2179 2179
2180 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 2180 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
2181 if (h_ret != H_SUCCESS) { 2181 if (h_ret != H_SUCCESS) {
2182 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " 2182 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
2183 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); 2183 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
2184 return ehca2ib_return_code(h_ret); 2184 return ehca2ib_return_code(h_ret);
2185 } 2185 }
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index c7112686782f..5a3d96f84c79 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -822,7 +822,7 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
822 offset = qmap->next_wqe_idx * ipz_queue->qe_size; 822 offset = qmap->next_wqe_idx * ipz_queue->qe_size;
823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); 823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
824 if (!wqe) { 824 if (!wqe) {
825 ehca_err(cq->device, "Invalid wqe offset=%#lx on " 825 ehca_err(cq->device, "Invalid wqe offset=%#llx on "
826 "qp_num=%#x", offset, my_qp->real_qp_num); 826 "qp_num=%#x", offset, my_qp->real_qp_num);
827 return nr; 827 return nr;
828 } 828 }
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 706d97ad5555..44447aaa5501 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -85,7 +85,7 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
85 85
86 if (ret != H_SUCCESS) { 86 if (ret != H_SUCCESS) {
87 ehca_err(&shca->ib_device, 87 ehca_err(&shca->ib_device,
88 "Can't define AQP1 for port %x. h_ret=%li", 88 "Can't define AQP1 for port %x. h_ret=%lli",
89 port, ret); 89 port, ret);
90 return ret; 90 return ret;
91 } 91 }
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 21f7d06f14ad..f09914cccf53 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -116,7 +116,7 @@ extern int ehca_debug_level;
116 unsigned char *deb = (unsigned char *)(adr); \ 116 unsigned char *deb = (unsigned char *)(adr); \
117 for (x = 0; x < l; x += 16) { \ 117 for (x = 0; x < l; x += 16) { \
118 printk(KERN_INFO "EHCA_DMP:%s " format \ 118 printk(KERN_INFO "EHCA_DMP:%s " format \
119 " adr=%p ofs=%04x %016lx %016lx\n", \ 119 " adr=%p ofs=%04x %016llx %016llx\n", \
120 __func__, ##args, deb, x, \ 120 __func__, ##args, deb, x, \
121 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ 121 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
122 deb += 16; \ 122 deb += 16; \
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e43ed8f8a0c8..3cb688d29131 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -114,7 +114,7 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
114 114
115 physical = galpas->user.fw_handle; 115 physical = galpas->user.fw_handle;
116 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 116 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
117 ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical); 117 ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
118 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ 118 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
119 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, 119 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
120 vma->vm_page_prot); 120 vma->vm_page_prot);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 415d3a465de6..d0ab0c0d5e91 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -226,7 +226,7 @@ u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
226 u32 *eq_ist) 226 u32 *eq_ist)
227{ 227{
228 u64 ret; 228 u64 ret;
229 u64 outs[PLPAR_HCALL9_BUFSIZE]; 229 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
230 u64 allocate_controls; 230 u64 allocate_controls;
231 231
232 /* resource type */ 232 /* resource type */
@@ -249,7 +249,7 @@ u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
249 *eq_ist = (u32)outs[5]; 249 *eq_ist = (u32)outs[5];
250 250
251 if (ret == H_NOT_ENOUGH_RESOURCES) 251 if (ret == H_NOT_ENOUGH_RESOURCES)
252 ehca_gen_err("Not enough resource - ret=%li ", ret); 252 ehca_gen_err("Not enough resource - ret=%lli ", ret);
253 253
254 return ret; 254 return ret;
255} 255}
@@ -270,7 +270,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
270 struct ehca_alloc_cq_parms *param) 270 struct ehca_alloc_cq_parms *param)
271{ 271{
272 u64 ret; 272 u64 ret;
273 u64 outs[PLPAR_HCALL9_BUFSIZE]; 273 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
274 274
275 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 275 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
276 adapter_handle.handle, /* r4 */ 276 adapter_handle.handle, /* r4 */
@@ -287,7 +287,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); 287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
288 288
289 if (ret == H_NOT_ENOUGH_RESOURCES) 289 if (ret == H_NOT_ENOUGH_RESOURCES)
290 ehca_gen_err("Not enough resources. ret=%li", ret); 290 ehca_gen_err("Not enough resources. ret=%lli", ret);
291 291
292 return ret; 292 return ret;
293} 293}
@@ -297,7 +297,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
297{ 297{
298 u64 ret; 298 u64 ret;
299 u64 allocate_controls, max_r10_reg, r11, r12; 299 u64 allocate_controls, max_r10_reg, r11, r12;
300 u64 outs[PLPAR_HCALL9_BUFSIZE]; 300 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
301 301
302 allocate_controls = 302 allocate_controls =
303 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type) 303 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
@@ -362,7 +362,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); 362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
363 363
364 if (ret == H_NOT_ENOUGH_RESOURCES) 364 if (ret == H_NOT_ENOUGH_RESOURCES)
365 ehca_gen_err("Not enough resources. ret=%li", ret); 365 ehca_gen_err("Not enough resources. ret=%lli", ret);
366 366
367 return ret; 367 return ret;
368} 368}
@@ -454,7 +454,7 @@ u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
454 const u64 count) 454 const u64 count)
455{ 455{
456 if (count != 1) { 456 if (count != 1) {
457 ehca_gen_err("Ppage counter=%lx", count); 457 ehca_gen_err("Ppage counter=%llx", count);
458 return H_PARAMETER; 458 return H_PARAMETER;
459 } 459 }
460 return hipz_h_register_rpage(adapter_handle, 460 return hipz_h_register_rpage(adapter_handle,
@@ -489,7 +489,7 @@ u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
489 const struct h_galpa gal) 489 const struct h_galpa gal)
490{ 490{
491 if (count != 1) { 491 if (count != 1) {
492 ehca_gen_err("Page counter=%lx", count); 492 ehca_gen_err("Page counter=%llx", count);
493 return H_PARAMETER; 493 return H_PARAMETER;
494 } 494 }
495 495
@@ -508,7 +508,7 @@ u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
508 const struct h_galpa galpa) 508 const struct h_galpa galpa)
509{ 509{
510 if (count > 1) { 510 if (count > 1) {
511 ehca_gen_err("Page counter=%lx", count); 511 ehca_gen_err("Page counter=%llx", count);
512 return H_PARAMETER; 512 return H_PARAMETER;
513 } 513 }
514 514
@@ -525,7 +525,7 @@ u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
525 int dis_and_get_function_code) 525 int dis_and_get_function_code)
526{ 526{
527 u64 ret; 527 u64 ret;
528 u64 outs[PLPAR_HCALL9_BUFSIZE]; 528 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
529 529
530 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, 530 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
531 adapter_handle.handle, /* r4 */ 531 adapter_handle.handle, /* r4 */
@@ -548,7 +548,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
548 struct h_galpa gal) 548 struct h_galpa gal)
549{ 549{
550 u64 ret; 550 u64 ret;
551 u64 outs[PLPAR_HCALL9_BUFSIZE]; 551 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
552 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs, 552 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
553 adapter_handle.handle, /* r4 */ 553 adapter_handle.handle, /* r4 */
554 qp_handle.handle, /* r5 */ 554 qp_handle.handle, /* r5 */
@@ -557,7 +557,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
557 0, 0, 0, 0, 0); 557 0, 0, 0, 0, 0);
558 558
559 if (ret == H_NOT_ENOUGH_RESOURCES) 559 if (ret == H_NOT_ENOUGH_RESOURCES)
560 ehca_gen_err("Insufficient resources ret=%li", ret); 560 ehca_gen_err("Insufficient resources ret=%lli", ret);
561 561
562 return ret; 562 return ret;
563} 563}
@@ -579,7 +579,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
579 struct ehca_qp *qp) 579 struct ehca_qp *qp)
580{ 580{
581 u64 ret; 581 u64 ret;
582 u64 outs[PLPAR_HCALL9_BUFSIZE]; 582 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
583 583
584 ret = hcp_galpas_dtor(&qp->galpas); 584 ret = hcp_galpas_dtor(&qp->galpas);
585 if (ret) { 585 if (ret) {
@@ -593,7 +593,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
593 qp->ipz_qp_handle.handle, /* r6 */ 593 qp->ipz_qp_handle.handle, /* r6 */
594 0, 0, 0, 0, 0, 0); 594 0, 0, 0, 0, 0, 0);
595 if (ret == H_HARDWARE) 595 if (ret == H_HARDWARE)
596 ehca_gen_err("HCA not operational. ret=%li", ret); 596 ehca_gen_err("HCA not operational. ret=%lli", ret);
597 597
598 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, 598 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
599 adapter_handle.handle, /* r4 */ 599 adapter_handle.handle, /* r4 */
@@ -601,7 +601,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
601 0, 0, 0, 0, 0); 601 0, 0, 0, 0, 0);
602 602
603 if (ret == H_RESOURCE) 603 if (ret == H_RESOURCE)
604 ehca_gen_err("Resource still in use. ret=%li", ret); 604 ehca_gen_err("Resource still in use. ret=%lli", ret);
605 605
606 return ret; 606 return ret;
607} 607}
@@ -625,7 +625,7 @@ u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
625 u32 * bma_qp_nr) 625 u32 * bma_qp_nr)
626{ 626{
627 u64 ret; 627 u64 ret;
628 u64 outs[PLPAR_HCALL9_BUFSIZE]; 628 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
629 629
630 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs, 630 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
631 adapter_handle.handle, /* r4 */ 631 adapter_handle.handle, /* r4 */
@@ -636,7 +636,7 @@ u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
636 *bma_qp_nr = (u32)outs[1]; 636 *bma_qp_nr = (u32)outs[1];
637 637
638 if (ret == H_ALIAS_EXIST) 638 if (ret == H_ALIAS_EXIST)
639 ehca_gen_err("AQP1 already exists. ret=%li", ret); 639 ehca_gen_err("AQP1 already exists. ret=%lli", ret);
640 640
641 return ret; 641 return ret;
642} 642}
@@ -658,7 +658,7 @@ u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
658 0, 0); 658 0, 0);
659 659
660 if (ret == H_NOT_ENOUGH_RESOURCES) 660 if (ret == H_NOT_ENOUGH_RESOURCES)
661 ehca_gen_err("Not enough resources. ret=%li", ret); 661 ehca_gen_err("Not enough resources. ret=%lli", ret);
662 662
663 return ret; 663 return ret;
664} 664}
@@ -697,7 +697,7 @@ u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
697 0, 0, 0, 0); 697 0, 0, 0, 0);
698 698
699 if (ret == H_RESOURCE) 699 if (ret == H_RESOURCE)
700 ehca_gen_err("H_FREE_RESOURCE failed ret=%li ", ret); 700 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
701 701
702 return ret; 702 return ret;
703} 703}
@@ -719,7 +719,7 @@ u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
719 0, 0, 0, 0, 0); 719 0, 0, 0, 0, 0);
720 720
721 if (ret == H_RESOURCE) 721 if (ret == H_RESOURCE)
722 ehca_gen_err("Resource in use. ret=%li ", ret); 722 ehca_gen_err("Resource in use. ret=%lli ", ret);
723 723
724 return ret; 724 return ret;
725} 725}
@@ -733,7 +733,7 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
733 struct ehca_mr_hipzout_parms *outparms) 733 struct ehca_mr_hipzout_parms *outparms)
734{ 734{
735 u64 ret; 735 u64 ret;
736 u64 outs[PLPAR_HCALL9_BUFSIZE]; 736 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
737 737
738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
739 adapter_handle.handle, /* r4 */ 739 adapter_handle.handle, /* r4 */
@@ -774,9 +774,9 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
774 774
775 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) { 775 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
776 ehca_gen_err("logical_address_of_page not on a 4k boundary " 776 ehca_gen_err("logical_address_of_page not on a 4k boundary "
777 "adapter_handle=%lx mr=%p mr_handle=%lx " 777 "adapter_handle=%llx mr=%p mr_handle=%llx "
778 "pagesize=%x queue_type=%x " 778 "pagesize=%x queue_type=%x "
779 "logical_address_of_page=%lx count=%lx", 779 "logical_address_of_page=%llx count=%llx",
780 adapter_handle.handle, mr, 780 adapter_handle.handle, mr,
781 mr->ipz_mr_handle.handle, pagesize, queue_type, 781 mr->ipz_mr_handle.handle, pagesize, queue_type,
782 logical_address_of_page, count); 782 logical_address_of_page, count);
@@ -794,7 +794,7 @@ u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
794 struct ehca_mr_hipzout_parms *outparms) 794 struct ehca_mr_hipzout_parms *outparms)
795{ 795{
796 u64 ret; 796 u64 ret;
797 u64 outs[PLPAR_HCALL9_BUFSIZE]; 797 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
798 798
799 ret = ehca_plpar_hcall9(H_QUERY_MR, outs, 799 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
800 adapter_handle.handle, /* r4 */ 800 adapter_handle.handle, /* r4 */
@@ -828,7 +828,7 @@ u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
828 struct ehca_mr_hipzout_parms *outparms) 828 struct ehca_mr_hipzout_parms *outparms)
829{ 829{
830 u64 ret; 830 u64 ret;
831 u64 outs[PLPAR_HCALL9_BUFSIZE]; 831 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
832 832
833 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs, 833 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
834 adapter_handle.handle, /* r4 */ 834 adapter_handle.handle, /* r4 */
@@ -855,7 +855,7 @@ u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
855 struct ehca_mr_hipzout_parms *outparms) 855 struct ehca_mr_hipzout_parms *outparms)
856{ 856{
857 u64 ret; 857 u64 ret;
858 u64 outs[PLPAR_HCALL9_BUFSIZE]; 858 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
859 859
860 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs, 860 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
861 adapter_handle.handle, /* r4 */ 861 adapter_handle.handle, /* r4 */
@@ -877,7 +877,7 @@ u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
877 struct ehca_mw_hipzout_parms *outparms) 877 struct ehca_mw_hipzout_parms *outparms)
878{ 878{
879 u64 ret; 879 u64 ret;
880 u64 outs[PLPAR_HCALL9_BUFSIZE]; 880 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
881 881
882 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 882 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
883 adapter_handle.handle, /* r4 */ 883 adapter_handle.handle, /* r4 */
@@ -895,7 +895,7 @@ u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
895 struct ehca_mw_hipzout_parms *outparms) 895 struct ehca_mw_hipzout_parms *outparms)
896{ 896{
897 u64 ret; 897 u64 ret;
898 u64 outs[PLPAR_HCALL9_BUFSIZE]; 898 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
899 899
900 ret = ehca_plpar_hcall9(H_QUERY_MW, outs, 900 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
901 adapter_handle.handle, /* r4 */ 901 adapter_handle.handle, /* r4 */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 39167a797f99..a91cb4c3fa5c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1462,7 +1462,8 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1462} 1462}
1463 1463
1464static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, 1464static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
1465 struct mlx4_ib_qp *qp, unsigned *lso_seg_len) 1465 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
1466 __be32 *lso_hdr_sz)
1466{ 1467{
1467 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 1468 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
1468 1469
@@ -1479,12 +1480,8 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
1479 1480
1480 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); 1481 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
1481 1482
1482 /* make sure LSO header is written before overwriting stamping */ 1483 *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1483 wmb(); 1484 wr->wr.ud.hlen);
1484
1485 wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1486 wr->wr.ud.hlen);
1487
1488 *lso_seg_len = halign; 1485 *lso_seg_len = halign;
1489 return 0; 1486 return 0;
1490} 1487}
@@ -1518,6 +1515,9 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1518 int uninitialized_var(stamp); 1515 int uninitialized_var(stamp);
1519 int uninitialized_var(size); 1516 int uninitialized_var(size);
1520 unsigned uninitialized_var(seglen); 1517 unsigned uninitialized_var(seglen);
1518 __be32 dummy;
1519 __be32 *lso_wqe;
1520 __be32 uninitialized_var(lso_hdr_sz);
1521 int i; 1521 int i;
1522 1522
1523 spin_lock_irqsave(&qp->sq.lock, flags); 1523 spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1525,6 +1525,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1525 ind = qp->sq_next_wqe; 1525 ind = qp->sq_next_wqe;
1526 1526
1527 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1527 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1528 lso_wqe = &dummy;
1529
1528 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1530 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1529 err = -ENOMEM; 1531 err = -ENOMEM;
1530 *bad_wr = wr; 1532 *bad_wr = wr;
@@ -1606,11 +1608,12 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1606 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1608 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1607 1609
1608 if (wr->opcode == IB_WR_LSO) { 1610 if (wr->opcode == IB_WR_LSO) {
1609 err = build_lso_seg(wqe, wr, qp, &seglen); 1611 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz);
1610 if (unlikely(err)) { 1612 if (unlikely(err)) {
1611 *bad_wr = wr; 1613 *bad_wr = wr;
1612 goto out; 1614 goto out;
1613 } 1615 }
1616 lso_wqe = (__be32 *) wqe;
1614 wqe += seglen; 1617 wqe += seglen;
1615 size += seglen / 16; 1618 size += seglen / 16;
1616 } 1619 }
@@ -1652,6 +1655,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1652 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) 1655 for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
1653 set_data_seg(dseg, wr->sg_list + i); 1656 set_data_seg(dseg, wr->sg_list + i);
1654 1657
1658 /*
1659 * Possibly overwrite stamping in cacheline with LSO
1660 * segment only after making sure all data segments
1661 * are written.
1662 */
1663 wmb();
1664 *lso_wqe = lso_hdr_sz;
1665
1655 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 1666 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
1656 MLX4_WQE_CTRL_FENCE : 0) | size; 1667 MLX4_WQE_CTRL_FENCE : 0) | size;
1657 1668
@@ -1686,7 +1697,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1686 stamp_send_wqe(qp, stamp, size * 16); 1697 stamp_send_wqe(qp, stamp, size * 16);
1687 ind = pad_wraparound(qp, ind); 1698 ind = pad_wraparound(qp, ind);
1688 } 1699 }
1689
1690 } 1700 }
1691 1701
1692out: 1702out:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index dce0443f9d69..0bd2a4ff0842 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -106,23 +106,17 @@ int ipoib_open(struct net_device *dev)
106 106
107 ipoib_dbg(priv, "bringing up interface\n"); 107 ipoib_dbg(priv, "bringing up interface\n");
108 108
109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 109 if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
110 napi_enable(&priv->napi);
110 111
111 if (ipoib_pkey_dev_delay_open(dev)) 112 if (ipoib_pkey_dev_delay_open(dev))
112 return 0; 113 return 0;
113 114
114 napi_enable(&priv->napi); 115 if (ipoib_ib_dev_open(dev))
116 goto err_disable;
115 117
116 if (ipoib_ib_dev_open(dev)) { 118 if (ipoib_ib_dev_up(dev))
117 napi_disable(&priv->napi); 119 goto err_stop;
118 return -EINVAL;
119 }
120
121 if (ipoib_ib_dev_up(dev)) {
122 ipoib_ib_dev_stop(dev, 1);
123 napi_disable(&priv->napi);
124 return -EINVAL;
125 }
126 120
127 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 121 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
128 struct ipoib_dev_priv *cpriv; 122 struct ipoib_dev_priv *cpriv;
@@ -144,6 +138,15 @@ int ipoib_open(struct net_device *dev)
144 netif_start_queue(dev); 138 netif_start_queue(dev);
145 139
146 return 0; 140 return 0;
141
142err_stop:
143 ipoib_ib_dev_stop(dev, 1);
144
145err_disable:
146 napi_disable(&priv->napi);
147 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
148
149 return -EINVAL;
147} 150}
148 151
149static int ipoib_stop(struct net_device *dev) 152static int ipoib_stop(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 59d02e0b8df1..425e31112ed7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -409,7 +409,7 @@ static int ipoib_mcast_join_complete(int status,
409 } 409 }
410 410
411 if (mcast->logcount++ < 20) { 411 if (mcast->logcount++ < 20) {
412 if (status == -ETIMEDOUT) { 412 if (status == -ETIMEDOUT || status == -EAGAIN) {
413 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", 413 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
414 mcast->mcmember.mgid.raw, status); 414 mcast->mcmember.mgid.raw, status);
415 } else { 415 } else {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 2cf1a4088718..5a76a5510350 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -61,6 +61,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
61 61
62 ppriv = netdev_priv(pdev); 62 ppriv = netdev_priv(pdev);
63 63
64 rtnl_lock();
64 mutex_lock(&ppriv->vlan_mutex); 65 mutex_lock(&ppriv->vlan_mutex);
65 66
66 /* 67 /*
@@ -111,7 +112,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
111 goto device_init_failed; 112 goto device_init_failed;
112 } 113 }
113 114
114 result = register_netdev(priv->dev); 115 result = register_netdevice(priv->dev);
115 if (result) { 116 if (result) {
116 ipoib_warn(priv, "failed to initialize; error %i", result); 117 ipoib_warn(priv, "failed to initialize; error %i", result);
117 goto register_failed; 118 goto register_failed;
@@ -134,12 +135,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
134 list_add_tail(&priv->list, &ppriv->child_intfs); 135 list_add_tail(&priv->list, &ppriv->child_intfs);
135 136
136 mutex_unlock(&ppriv->vlan_mutex); 137 mutex_unlock(&ppriv->vlan_mutex);
138 rtnl_unlock();
137 139
138 return 0; 140 return 0;
139 141
140sysfs_failed: 142sysfs_failed:
141 ipoib_delete_debug_files(priv->dev); 143 ipoib_delete_debug_files(priv->dev);
142 unregister_netdev(priv->dev); 144 unregister_netdevice(priv->dev);
143 145
144register_failed: 146register_failed:
145 ipoib_dev_cleanup(priv->dev); 147 ipoib_dev_cleanup(priv->dev);
@@ -149,6 +151,7 @@ device_init_failed:
149 151
150err: 152err:
151 mutex_unlock(&ppriv->vlan_mutex); 153 mutex_unlock(&ppriv->vlan_mutex);
154 rtnl_unlock();
152 return result; 155 return result;
153} 156}
154 157
@@ -162,10 +165,11 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
162 165
163 ppriv = netdev_priv(pdev); 166 ppriv = netdev_priv(pdev);
164 167
168 rtnl_lock();
165 mutex_lock(&ppriv->vlan_mutex); 169 mutex_lock(&ppriv->vlan_mutex);
166 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 170 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
167 if (priv->pkey == pkey) { 171 if (priv->pkey == pkey) {
168 unregister_netdev(priv->dev); 172 unregister_netdevice(priv->dev);
169 ipoib_dev_cleanup(priv->dev); 173 ipoib_dev_cleanup(priv->dev);
170 list_del(&priv->list); 174 list_del(&priv->list);
171 free_netdev(priv->dev); 175 free_netdev(priv->dev);
@@ -175,6 +179,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
175 } 179 }
176 } 180 }
177 mutex_unlock(&ppriv->vlan_mutex); 181 mutex_unlock(&ppriv->vlan_mutex);
182 rtnl_unlock();
178 183
179 return ret; 184 return ret;
180} 185}
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 7c5f97033b9f..cb8943da4f12 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -292,7 +292,9 @@ isdn_net_unbind_channel(isdn_net_local * lp)
292 lp->dialstate = 0; 292 lp->dialstate = 0;
293 dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL; 293 dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
294 dev->st_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL; 294 dev->st_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL;
295 isdn_free_channel(lp->isdn_device, lp->isdn_channel, ISDN_USAGE_NET); 295 if (lp->isdn_device != -1 && lp->isdn_channel != -1)
296 isdn_free_channel(lp->isdn_device, lp->isdn_channel,
297 ISDN_USAGE_NET);
296 lp->flags &= ~ISDN_NET_CONNECTED; 298 lp->flags &= ~ISDN_NET_CONNECTED;
297 lp->isdn_device = -1; 299 lp->isdn_device = -1;
298 lp->isdn_channel = -1; 300 lp->isdn_channel = -1;
@@ -2513,7 +2515,6 @@ static const struct net_device_ops isdn_netdev_ops = {
2513 .ndo_stop = isdn_net_close, 2515 .ndo_stop = isdn_net_close,
2514 .ndo_do_ioctl = isdn_net_ioctl, 2516 .ndo_do_ioctl = isdn_net_ioctl,
2515 2517
2516 .ndo_validate_addr = NULL,
2517 .ndo_start_xmit = isdn_net_start_xmit, 2518 .ndo_start_xmit = isdn_net_start_xmit,
2518 .ndo_get_stats = isdn_net_get_stats, 2519 .ndo_get_stats = isdn_net_get_stats,
2519 .ndo_tx_timeout = isdn_net_tx_timeout, 2520 .ndo_tx_timeout = isdn_net_tx_timeout,
@@ -2528,12 +2529,8 @@ static void _isdn_setup(struct net_device *dev)
2528 2529
2529 ether_setup(dev); 2530 ether_setup(dev);
2530 2531
2531 dev->flags = IFF_NOARP | IFF_POINTOPOINT;
2532 /* Setup the generic properties */ 2532 /* Setup the generic properties */
2533 dev->mtu = 1500;
2534 dev->flags = IFF_NOARP|IFF_POINTOPOINT; 2533 dev->flags = IFF_NOARP|IFF_POINTOPOINT;
2535 dev->type = ARPHRD_ETHER;
2536 dev->addr_len = ETH_ALEN;
2537 dev->header_ops = NULL; 2534 dev->header_ops = NULL;
2538 dev->netdev_ops = &isdn_netdev_ops; 2535 dev->netdev_ops = &isdn_netdev_ops;
2539 2536
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a3551dd0324d..aa30b5cb3513 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -431,6 +431,7 @@ set_arg(void __user *b, void *val,int len)
431 return 0; 431 return 0;
432} 432}
433 433
434#ifdef CONFIG_IPPP_FILTER
434static int get_filter(void __user *arg, struct sock_filter **p) 435static int get_filter(void __user *arg, struct sock_filter **p)
435{ 436{
436 struct sock_fprog uprog; 437 struct sock_fprog uprog;
@@ -465,6 +466,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
465 *p = code; 466 *p = code;
466 return uprog.len; 467 return uprog.len;
467} 468}
469#endif /* CONFIG_IPPP_FILTER */
468 470
469/* 471/*
470 * ippp device ioctl 472 * ippp device ioctl
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a4a1ae214630..742713611bc5 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -119,13 +119,6 @@ config LEDS_GPIO
119 outputs. To be useful the particular board must have LEDs 119 outputs. To be useful the particular board must have LEDs
120 and they must be connected to the GPIO lines. 120 and they must be connected to the GPIO lines.
121 121
122config LEDS_HP_DISK
123 tristate "LED Support for disk protection LED on HP notebooks"
124 depends on LEDS_CLASS && ACPI
125 help
126 This option enable support for disk protection LED, found on
127 newer HP notebooks.
128
129config LEDS_CLEVO_MAIL 122config LEDS_CLEVO_MAIL
130 tristate "Mail LED on Clevo notebook (EXPERIMENTAL)" 123 tristate "Mail LED on Clevo notebook (EXPERIMENTAL)"
131 depends on LEDS_CLASS && X86 && SERIO_I8042 && DMI && EXPERIMENTAL 124 depends on LEDS_CLASS && X86 && SERIO_I8042 && DMI && EXPERIMENTAL
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index bc247cb02e82..9d76f0f160a4 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
23obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 23obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
24obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o 24obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
25obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o 25obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
26obj-$(CONFIG_LEDS_HP_DISK) += leds-hp-disk.o
27obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o 26obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
28 27
29# LED Triggers 28# LED Triggers
diff --git a/drivers/leds/leds-hp-disk.c b/drivers/leds/leds-hp-disk.c
deleted file mode 100644
index d786adc8c5e3..000000000000
--- a/drivers/leds/leds-hp-disk.c
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * leds-hp-disk.c - driver for HP "hard disk protection" LED
3 *
4 * Copyright (C) 2008 Pavel Machek
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/dmi.h>
24#include <linux/module.h>
25#include <linux/types.h>
26#include <linux/platform_device.h>
27#include <linux/interrupt.h>
28#include <linux/input.h>
29#include <linux/kthread.h>
30#include <linux/leds.h>
31#include <acpi/acpi_drivers.h>
32
33#define DRIVER_NAME "leds-hp-disk"
34#define ACPI_MDPS_CLASS "led"
35
36/* For automatic insertion of the module */
37static struct acpi_device_id hpled_device_ids[] = {
38 {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
39 {"", 0},
40};
41MODULE_DEVICE_TABLE(acpi, hpled_device_ids);
42
43struct acpi_hpled {
44 struct acpi_device *device; /* The ACPI device */
45};
46
47static struct acpi_hpled adev;
48
49static acpi_status hpled_acpi_write(acpi_handle handle, int reg)
50{
51 unsigned long long ret; /* Not used when writing */
52 union acpi_object in_obj[1];
53 struct acpi_object_list args = { 1, in_obj };
54
55 in_obj[0].type = ACPI_TYPE_INTEGER;
56 in_obj[0].integer.value = reg;
57
58 return acpi_evaluate_integer(handle, "ALED", &args, &ret);
59}
60
61static void hpled_set(struct led_classdev *led_cdev,
62 enum led_brightness value)
63{
64 hpled_acpi_write(adev.device->handle, !!value);
65}
66
67static struct led_classdev hpled_led = {
68 .name = "hp:red:hddprotection",
69 .default_trigger = "heartbeat",
70 .brightness_set = hpled_set,
71 .flags = LED_CORE_SUSPENDRESUME,
72};
73
74static int hpled_add(struct acpi_device *device)
75{
76 int ret;
77
78 if (!device)
79 return -EINVAL;
80
81 adev.device = device;
82 strcpy(acpi_device_name(device), DRIVER_NAME);
83 strcpy(acpi_device_class(device), ACPI_MDPS_CLASS);
84 device->driver_data = &adev;
85
86 ret = led_classdev_register(NULL, &hpled_led);
87 return ret;
88}
89
90static int hpled_remove(struct acpi_device *device, int type)
91{
92 if (!device)
93 return -EINVAL;
94
95 led_classdev_unregister(&hpled_led);
96 return 0;
97}
98
99
100
101static struct acpi_driver leds_hp_driver = {
102 .name = DRIVER_NAME,
103 .class = ACPI_MDPS_CLASS,
104 .ids = hpled_device_ids,
105 .ops = {
106 .add = hpled_add,
107 .remove = hpled_remove,
108 }
109};
110
111static int __init hpled_init_module(void)
112{
113 int ret;
114
115 if (acpi_disabled)
116 return -ENODEV;
117
118 ret = acpi_bus_register_driver(&leds_hp_driver);
119 if (ret < 0)
120 return ret;
121
122 printk(KERN_INFO DRIVER_NAME " driver loaded.\n");
123
124 return 0;
125}
126
127static void __exit hpled_exit_module(void)
128{
129 acpi_bus_unregister_driver(&leds_hp_driver);
130}
131
132MODULE_DESCRIPTION("Driver for HP disk protection LED");
133MODULE_AUTHOR("Pavel Machek <pavel@suse.cz>");
134MODULE_LICENSE("GPL");
135
136module_init(hpled_init_module);
137module_exit(hpled_exit_module);
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
index 10b6ef758725..11c0f461320e 100644
--- a/drivers/message/fusion/lsi/mpi.h
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -6,7 +6,7 @@
6 * Title: MPI Message independent structures and definitions 6 * Title: MPI Message independent structures and definitions
7 * Creation Date: July 27, 2000 7 * Creation Date: July 27, 2000
8 * 8 *
9 * mpi.h Version: 01.05.13 9 * mpi.h Version: 01.05.16
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -79,6 +79,9 @@
79 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT. 79 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
80 * 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT. 80 * 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
81 * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT. 81 * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT.
82 * 08-07-07 01.05.14 Bumped MPI_HEADER_VERSION_UNIT.
83 * 01-15-08 01.05.15 Bumped MPI_HEADER_VERSION_UNIT.
84 * 03-28-08 01.05.16 Bumped MPI_HEADER_VERSION_UNIT.
82 * -------------------------------------------------------------------------- 85 * --------------------------------------------------------------------------
83 */ 86 */
84 87
@@ -109,7 +112,7 @@
109/* Note: The major versions of 0xe0 through 0xff are reserved */ 112/* Note: The major versions of 0xe0 through 0xff are reserved */
110 113
111/* versioning for this MPI header set */ 114/* versioning for this MPI header set */
112#define MPI_HEADER_VERSION_UNIT (0x10) 115#define MPI_HEADER_VERSION_UNIT (0x13)
113#define MPI_HEADER_VERSION_DEV (0x00) 116#define MPI_HEADER_VERSION_DEV (0x00)
114#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00) 117#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
115#define MPI_HEADER_VERSION_UNIT_SHIFT (8) 118#define MPI_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index b2db3330c591..013c7d881948 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -6,7 +6,7 @@
6 * Title: MPI Config message, structures, and Pages 6 * Title: MPI Config message, structures, and Pages
7 * Creation Date: July 27, 2000 7 * Creation Date: July 27, 2000
8 * 8 *
9 * mpi_cnfg.h Version: 01.05.15 9 * mpi_cnfg.h Version: 01.05.18
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -308,6 +308,20 @@
308 * Expander Page 0 Flags field. 308 * Expander Page 0 Flags field.
309 * Fixed define for 309 * Fixed define for
310 * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED. 310 * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
311 * 08-07-07 01.05.16 Added MPI_IOCPAGE6_CAP_FLAGS_MULTIPORT_DRIVE_SUPPORT
312 * define.
313 * Added BIOS Page 4 structure.
314 * Added MPI_RAID_PHYS_DISK1_PATH_MAX define for RAID
315 * Physcial Disk Page 1.
316 * 01-15-07 01.05.17 Added additional bit defines for ExtFlags field of
317 * Manufacturing Page 4.
318 * Added Solid State Drives Supported bit to IOC Page 6
319 * Capabilities Flags.
320 * Added new value for AccessStatus field of SAS Device
321 * Page 0 (_SATA_NEEDS_INITIALIZATION).
322 * 03-28-08 01.05.18 Defined new bits in Manufacturing Page 4 ExtFlags field
323 * to control coercion size and the mixing of SAS and SATA
324 * SSD drives.
311 * -------------------------------------------------------------------------- 325 * --------------------------------------------------------------------------
312 */ 326 */
313 327
@@ -686,6 +700,14 @@ typedef struct _CONFIG_PAGE_MANUFACTURING_4
686#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01) 700#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01)
687 701
688/* defines for the ExtFlags field */ 702/* defines for the ExtFlags field */
703#define MPI_MANPAGE4_EXTFLAGS_MASK_COERCION_SIZE (0x0180)
704#define MPI_MANPAGE4_EXTFLAGS_SHIFT_COERCION_SIZE (7)
705#define MPI_MANPAGE4_EXTFLAGS_1GB_COERCION_SIZE (0)
706#define MPI_MANPAGE4_EXTFLAGS_128MB_COERCION_SIZE (1)
707
708#define MPI_MANPAGE4_EXTFLAGS_NO_MIX_SSD_SAS_SATA (0x0040)
709#define MPI_MANPAGE4_EXTFLAGS_MIX_SSD_AND_NON_SSD (0x0020)
710#define MPI_MANPAGE4_EXTFLAGS_DUAL_PORT_SUPPORT (0x0010)
689#define MPI_MANPAGE4_EXTFLAGS_HIDE_NON_IR_METADATA (0x0008) 711#define MPI_MANPAGE4_EXTFLAGS_HIDE_NON_IR_METADATA (0x0008)
690#define MPI_MANPAGE4_EXTFLAGS_SAS_CACHE_DISABLE (0x0004) 712#define MPI_MANPAGE4_EXTFLAGS_SAS_CACHE_DISABLE (0x0004)
691#define MPI_MANPAGE4_EXTFLAGS_SATA_CACHE_DISABLE (0x0002) 713#define MPI_MANPAGE4_EXTFLAGS_SATA_CACHE_DISABLE (0x0002)
@@ -1159,6 +1181,8 @@ typedef struct _CONFIG_PAGE_IOC_6
1159 1181
1160/* IOC Page 6 Capabilities Flags */ 1182/* IOC Page 6 Capabilities Flags */
1161 1183
1184#define MPI_IOCPAGE6_CAP_FLAGS_SSD_SUPPORT (0x00000020)
1185#define MPI_IOCPAGE6_CAP_FLAGS_MULTIPORT_DRIVE_SUPPORT (0x00000010)
1162#define MPI_IOCPAGE6_CAP_FLAGS_DISABLE_SMART_POLLING (0x00000008) 1186#define MPI_IOCPAGE6_CAP_FLAGS_DISABLE_SMART_POLLING (0x00000008)
1163 1187
1164#define MPI_IOCPAGE6_CAP_FLAGS_MASK_METADATA_SIZE (0x00000006) 1188#define MPI_IOCPAGE6_CAP_FLAGS_MASK_METADATA_SIZE (0x00000006)
@@ -1428,6 +1452,15 @@ typedef struct _CONFIG_PAGE_BIOS_2
1428#define MPI_BIOSPAGE2_FORM_SAS_WWN (0x05) 1452#define MPI_BIOSPAGE2_FORM_SAS_WWN (0x05)
1429#define MPI_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) 1453#define MPI_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
1430 1454
1455typedef struct _CONFIG_PAGE_BIOS_4
1456{
1457 CONFIG_PAGE_HEADER Header; /* 00h */
1458 U64 ReassignmentBaseWWID; /* 04h */
1459} CONFIG_PAGE_BIOS_4, MPI_POINTER PTR_CONFIG_PAGE_BIOS_4,
1460 BIOSPage4_t, MPI_POINTER pBIOSPage4_t;
1461
1462#define MPI_BIOSPAGE4_PAGEVERSION (0x00)
1463
1431 1464
1432/**************************************************************************** 1465/****************************************************************************
1433* SCSI Port Config Pages 1466* SCSI Port Config Pages
@@ -2419,6 +2452,15 @@ typedef struct _RAID_PHYS_DISK1_PATH
2419#define MPI_RAID_PHYSDISK1_FLAG_BROKEN (0x0002) 2452#define MPI_RAID_PHYSDISK1_FLAG_BROKEN (0x0002)
2420#define MPI_RAID_PHYSDISK1_FLAG_INVALID (0x0001) 2453#define MPI_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
2421 2454
2455
2456/*
2457 * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
2458 * one and check Header.PageLength or NumPhysDiskPaths at runtime.
2459 */
2460#ifndef MPI_RAID_PHYS_DISK1_PATH_MAX
2461#define MPI_RAID_PHYS_DISK1_PATH_MAX (1)
2462#endif
2463
2422typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1 2464typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
2423{ 2465{
2424 CONFIG_PAGE_HEADER Header; /* 00h */ 2466 CONFIG_PAGE_HEADER Header; /* 00h */
@@ -2426,7 +2468,7 @@ typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
2426 U8 PhysDiskNum; /* 05h */ 2468 U8 PhysDiskNum; /* 05h */
2427 U16 Reserved2; /* 06h */ 2469 U16 Reserved2; /* 06h */
2428 U32 Reserved1; /* 08h */ 2470 U32 Reserved1; /* 08h */
2429 RAID_PHYS_DISK1_PATH Path[1]; /* 0Ch */ 2471 RAID_PHYS_DISK1_PATH Path[MPI_RAID_PHYS_DISK1_PATH_MAX];/* 0Ch */
2430} CONFIG_PAGE_RAID_PHYS_DISK_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_1, 2472} CONFIG_PAGE_RAID_PHYS_DISK_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_1,
2431 RaidPhysDiskPage1_t, MPI_POINTER pRaidPhysDiskPage1_t; 2473 RaidPhysDiskPage1_t, MPI_POINTER pRaidPhysDiskPage1_t;
2432 2474
@@ -2844,6 +2886,7 @@ typedef struct _CONFIG_PAGE_SAS_DEVICE_0
2844#define MPI_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) 2886#define MPI_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
2845#define MPI_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) 2887#define MPI_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
2846#define MPI_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) 2888#define MPI_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
2889#define MPI_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
2847/* specific values for SATA Init failures */ 2890/* specific values for SATA Init failures */
2848#define MPI_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) 2891#define MPI_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
2849#define MPI_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) 2892#define MPI_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
diff --git a/drivers/message/fusion/lsi/mpi_fc.h b/drivers/message/fusion/lsi/mpi_fc.h
index 627acfbb8623..7d663ce76f8c 100644
--- a/drivers/message/fusion/lsi/mpi_fc.h
+++ b/drivers/message/fusion/lsi/mpi_fc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 LSI Corporation. 2 * Copyright (c) 2000-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_fc.h 5 * Name: mpi_fc.h
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index 3f15fcfe4a2e..693e4b511354 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -3,28 +3,28 @@
3 MPI Header File Change History 3 MPI Header File Change History
4 ============================== 4 ==============================
5 5
6 Copyright (c) 2000-2007 LSI Corporation. 6 Copyright (c) 2000-2008 LSI Corporation.
7 7
8 --------------------------------------- 8 ---------------------------------------
9 Header Set Release Version: 01.05.16 9 Header Set Release Version: 01.05.19
10 Header Set Release Date: 05-24-07 10 Header Set Release Date: 03-28-08
11 --------------------------------------- 11 ---------------------------------------
12 12
13 Filename Current version Prior version 13 Filename Current version Prior version
14 ---------- --------------- ------------- 14 ---------- --------------- -------------
15 mpi.h 01.05.13 01.05.12 15 mpi.h 01.05.16 01.05.15
16 mpi_ioc.h 01.05.14 01.05.13 16 mpi_ioc.h 01.05.16 01.05.15
17 mpi_cnfg.h 01.05.15 01.05.14 17 mpi_cnfg.h 01.05.18 01.05.17
18 mpi_init.h 01.05.09 01.05.09 18 mpi_init.h 01.05.09 01.05.09
19 mpi_targ.h 01.05.06 01.05.06 19 mpi_targ.h 01.05.06 01.05.06
20 mpi_fc.h 01.05.01 01.05.01 20 mpi_fc.h 01.05.01 01.05.01
21 mpi_lan.h 01.05.01 01.05.01 21 mpi_lan.h 01.05.01 01.05.01
22 mpi_raid.h 01.05.03 01.05.03 22 mpi_raid.h 01.05.05 01.05.05
23 mpi_tool.h 01.05.03 01.05.03 23 mpi_tool.h 01.05.03 01.05.03
24 mpi_inb.h 01.05.01 01.05.01 24 mpi_inb.h 01.05.01 01.05.01
25 mpi_sas.h 01.05.04 01.05.04 25 mpi_sas.h 01.05.05 01.05.05
26 mpi_type.h 01.05.02 01.05.02 26 mpi_type.h 01.05.02 01.05.02
27 mpi_history.txt 01.05.14 01.05.14 27 mpi_history.txt 01.05.19 01.05.18
28 28
29 29
30 * Date Version Description 30 * Date Version Description
@@ -96,6 +96,9 @@ mpi.h
96 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT. 96 * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
97 * 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT. 97 * 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
98 * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT. 98 * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT.
99 * 08-07-07 01.05.14 Bumped MPI_HEADER_VERSION_UNIT.
100 * 01-15-08 01.05.15 Bumped MPI_HEADER_VERSION_UNIT.
101 * 03-28-08 01.05.16 Bumped MPI_HEADER_VERSION_UNIT.
99 * -------------------------------------------------------------------------- 102 * --------------------------------------------------------------------------
100 103
101mpi_ioc.h 104mpi_ioc.h
@@ -127,7 +130,7 @@ mpi_ioc.h
127 * 08-08-01 01.02.01 Original release for v1.2 work. 130 * 08-08-01 01.02.01 Original release for v1.2 work.
128 * New format for FWVersion and ProductId in 131 * New format for FWVersion and ProductId in
129 * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER. 132 * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
130 * 08-31-01 01.02.02 Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and 133 * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
131 * related structure and defines. 134 * related structure and defines.
132 * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED. 135 * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
133 * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE. 136 * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
@@ -187,7 +190,7 @@ mpi_ioc.h
187 * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED. 190 * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
188 * Added MaxInitiators field to PortFacts reply. 191 * Added MaxInitiators field to PortFacts reply.
189 * Added SAS Device Status Change ReasonCode for 192 * Added SAS Device Status Change ReasonCode for
190 * asynchronous notification. 193 * asynchronous notificaiton.
191 * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event 194 * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
192 * data structure. 195 * data structure.
193 * Added new ImageType values for FWDownload and FWUpload 196 * Added new ImageType values for FWDownload and FWUpload
@@ -199,6 +202,16 @@ mpi_ioc.h
199 * added _MULTI_PORT_DOMAIN. 202 * added _MULTI_PORT_DOMAIN.
200 * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request. 203 * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request.
201 * Added Common Boot Block type to FWUpload Request. 204 * Added Common Boot Block type to FWUpload Request.
205 * 08-07-07 01.05.15 Added MPI_EVENT_SAS_INIT_RC_REMOVED define.
206 * Added MPI_EVENT_IR2_RC_DUAL_PORT_ADDED and
207 * MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED for IR2 event data.
208 * Added SASAddress field to SAS Initiator Device Table
209 * Overflow event data structure.
210 * 03-28-08 01.05.16 Added two new ReasonCode values to SAS Device Status
211 * Change Event data to indicate completion of internally
212 * generated task management.
213 * Added MPI_EVENT_DSCVRY_ERR_DS_SATA_INIT_FAILURE define.
214 * Added MPI_EVENT_SAS_INIT_RC_INACCESSIBLE define.
202 * -------------------------------------------------------------------------- 215 * --------------------------------------------------------------------------
203 216
204mpi_cnfg.h 217mpi_cnfg.h
@@ -213,7 +226,7 @@ mpi_cnfg.h
213 * Added _RESPONSE_ID_MASK definition to SCSI_PORT_1 226 * Added _RESPONSE_ID_MASK definition to SCSI_PORT_1
214 * page and updated the page version. 227 * page and updated the page version.
215 * Added Information field and _INFO_PARAMS_NEGOTIATED 228 * Added Information field and _INFO_PARAMS_NEGOTIATED
216 * definition to SCSI_DEVICE_0 page. 229 * definitionto SCSI_DEVICE_0 page.
217 * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the 230 * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
218 * page version. 231 * page version.
219 * Added BucketsRemaining to LAN_1 page, redefined the 232 * Added BucketsRemaining to LAN_1 page, redefined the
@@ -496,6 +509,20 @@ mpi_cnfg.h
496 * Expander Page 0 Flags field. 509 * Expander Page 0 Flags field.
497 * Fixed define for 510 * Fixed define for
498 * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED. 511 * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
512 * 08-07-07 01.05.16 Added MPI_IOCPAGE6_CAP_FLAGS_MULTIPORT_DRIVE_SUPPORT
513 * define.
514 * Added BIOS Page 4 structure.
515 * Added MPI_RAID_PHYS_DISK1_PATH_MAX define for RAID
516 * Physcial Disk Page 1.
517 * 01-15-07 01.05.17 Added additional bit defines for ExtFlags field of
518 * Manufacturing Page 4.
519 * Added Solid State Drives Supported bit to IOC Page 6
520 * Capabilities Flags.
521 * Added new value for AccessStatus field of SAS Device
522 * Page 0 (_SATA_NEEDS_INITIALIZATION).
523 * 03-28-08 01.05.18 Defined new bits in Manufacturing Page 4 ExtFlags field
524 * to control coercion size and the mixing of SAS and SATA
525 * SSD drives.
499 * -------------------------------------------------------------------------- 526 * --------------------------------------------------------------------------
500 527
501mpi_init.h 528mpi_init.h
@@ -661,6 +688,9 @@ mpi_raid.h
661 * _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE. 688 * _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
662 * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and 689 * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and
663 * associated defines. 690 * associated defines.
691 * 08-07-07 01.05.04 Added Disable Full Rebuild bit to the ActionDataWord
692 * for the RAID Action MPI_RAID_ACTION_DISABLE_VOLUME.
693 * 01-15-08 01.05.05 Added define for MPI_RAID_ACTION_SET_VOLUME_NAME.
664 * -------------------------------------------------------------------------- 694 * --------------------------------------------------------------------------
665 695
666mpi_tool.h 696mpi_tool.h
@@ -694,6 +724,10 @@ mpi_sas.h
694 * reply. 724 * reply.
695 * 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO 725 * 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO
696 * Unit Control request. 726 * Unit Control request.
727 * 01-15-08 01.05.05 Added support for MPI_SAS_OP_SET_IOC_PARAMETER,
728 * including adding IOCParameter and IOCParameter value
729 * fields to SAS IO Unit Control Request.
730 * Added MPI_SAS_DEVICE_INFO_PRODUCT_SPECIFIC define.
697 * -------------------------------------------------------------------------- 731 * --------------------------------------------------------------------------
698 732
699mpi_type.h 733mpi_type.h
@@ -709,20 +743,20 @@ mpi_type.h
709 743
710mpi_history.txt Parts list history 744mpi_history.txt Parts list history
711 745
712Filename 01.05.15 01.05.15 746Filename 01.05.19 01.05.18 01.05.17 01.05.16 01.05.15
713---------- -------- -------- 747---------- -------- -------- -------- -------- --------
714mpi.h 01.05.12 01.05.13 748mpi.h 01.05.16 01.05.15 01.05.14 01.05.13 01.05.12
715mpi_ioc.h 01.05.13 01.05.14 749mpi_ioc.h 01.05.16 01.05.15 01.05.15 01.05.14 01.05.13
716mpi_cnfg.h 01.05.14 01.05.15 750mpi_cnfg.h 01.05.18 01.05.17 01.05.16 01.05.15 01.05.14
717mpi_init.h 01.05.09 01.05.09 751mpi_init.h 01.05.09 01.05.09 01.05.09 01.05.09 01.05.09
718mpi_targ.h 01.05.06 01.05.06 752mpi_targ.h 01.05.06 01.05.06 01.05.06 01.05.06 01.05.06
719mpi_fc.h 01.05.01 01.05.01 753mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
720mpi_lan.h 01.05.01 01.05.01 754mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
721mpi_raid.h 01.05.03 01.05.03 755mpi_raid.h 01.05.05 01.05.05 01.05.04 01.05.03 01.05.03
722mpi_tool.h 01.05.03 01.05.03 756mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03
723mpi_inb.h 01.05.01 01.05.01 757mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
724mpi_sas.h 01.05.04 01.05.04 758mpi_sas.h 01.05.05 01.05.05 01.05.04 01.05.04 01.05.04
725mpi_type.h 01.05.02 01.05.02 759mpi_type.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02
726 760
727Filename 01.05.14 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09 761Filename 01.05.14 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09
728---------- -------- -------- -------- -------- -------- -------- 762---------- -------- -------- -------- -------- -------- --------
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
index a9e3693601a7..4295d062caa7 100644
--- a/drivers/message/fusion/lsi/mpi_init.h
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2007 LSI Corporation. 2 * Copyright (c) 2000-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_init.h 5 * Name: mpi_init.h
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 5cbb6bd048e1..8faa4fab7b89 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2007 LSI Corporation. 2 * Copyright (c) 2000-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_ioc.h 5 * Name: mpi_ioc.h
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: August 11, 2000 7 * Creation Date: August 11, 2000
8 * 8 *
9 * mpi_ioc.h Version: 01.05.14 9 * mpi_ioc.h Version: 01.05.16
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -113,6 +113,16 @@
113 * added _MULTI_PORT_DOMAIN. 113 * added _MULTI_PORT_DOMAIN.
114 * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request. 114 * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request.
115 * Added Common Boot Block type to FWUpload Request. 115 * Added Common Boot Block type to FWUpload Request.
116 * 08-07-07 01.05.15 Added MPI_EVENT_SAS_INIT_RC_REMOVED define.
117 * Added MPI_EVENT_IR2_RC_DUAL_PORT_ADDED and
118 * MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED for IR2 event data.
119 * Added SASAddress field to SAS Initiator Device Table
120 * Overflow event data structure.
121 * 03-28-08 01.05.16 Added two new ReasonCode values to SAS Device Status
122 * Change Event data to indicate completion of internally
123 * generated task management.
124 * Added MPI_EVENT_DSCVRY_ERR_DS_SATA_INIT_FAILURE define.
125 * Added MPI_EVENT_SAS_INIT_RC_INACCESSIBLE define.
116 * -------------------------------------------------------------------------- 126 * --------------------------------------------------------------------------
117 */ 127 */
118 128
@@ -612,6 +622,8 @@ typedef struct _EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
612#define MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) 622#define MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
613#define MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) 623#define MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
614#define MPI_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) 624#define MPI_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
625#define MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET (0x0E)
626#define MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL (0x0F)
615 627
616 628
617/* SCSI Event data for Queue Full event */ 629/* SCSI Event data for Queue Full event */
@@ -708,6 +720,8 @@ typedef struct _MPI_EVENT_DATA_IR2
708#define MPI_EVENT_IR2_RC_PD_REMOVED (0x05) 720#define MPI_EVENT_IR2_RC_PD_REMOVED (0x05)
709#define MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED (0x06) 721#define MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED (0x06)
710#define MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR (0x07) 722#define MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR (0x07)
723#define MPI_EVENT_IR2_RC_DUAL_PORT_ADDED (0x08)
724#define MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED (0x09)
711 725
712/* defines for logical disk states */ 726/* defines for logical disk states */
713#define MPI_LD_STATE_OPTIMAL (0x00) 727#define MPI_LD_STATE_OPTIMAL (0x00)
@@ -867,6 +881,7 @@ typedef struct _EVENT_DATA_DISCOVERY_ERROR
867#define MPI_EVENT_DSCVRY_ERR_DS_UNSUPPORTED_DEVICE (0x00000800) 881#define MPI_EVENT_DSCVRY_ERR_DS_UNSUPPORTED_DEVICE (0x00000800)
868#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000) 882#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000)
869#define MPI_EVENT_DSCVRY_ERR_DS_MULTI_PORT_DOMAIN (0x00002000) 883#define MPI_EVENT_DSCVRY_ERR_DS_MULTI_PORT_DOMAIN (0x00002000)
884#define MPI_EVENT_DSCVRY_ERR_DS_SATA_INIT_FAILURE (0x00004000)
870 885
871/* SAS SMP Error Event data */ 886/* SAS SMP Error Event data */
872 887
@@ -902,6 +917,8 @@ typedef struct _EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE
902 917
903/* defines for the ReasonCode field of the SAS Initiator Device Status Change event */ 918/* defines for the ReasonCode field of the SAS Initiator Device Status Change event */
904#define MPI_EVENT_SAS_INIT_RC_ADDED (0x01) 919#define MPI_EVENT_SAS_INIT_RC_ADDED (0x01)
920#define MPI_EVENT_SAS_INIT_RC_REMOVED (0x02)
921#define MPI_EVENT_SAS_INIT_RC_INACCESSIBLE (0x03)
905 922
906/* SAS Initiator Device Table Overflow Event data */ 923/* SAS Initiator Device Table Overflow Event data */
907 924
@@ -910,6 +927,7 @@ typedef struct _EVENT_DATA_SAS_INIT_TABLE_OVERFLOW
910 U8 MaxInit; /* 00h */ 927 U8 MaxInit; /* 00h */
911 U8 CurrentInit; /* 01h */ 928 U8 CurrentInit; /* 01h */
912 U16 Reserved1; /* 02h */ 929 U16 Reserved1; /* 02h */
930 U64 SASAddress; /* 04h */
913} EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, 931} EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
914 MPI_POINTER PTR_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, 932 MPI_POINTER PTR_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
915 MpiEventDataSasInitTableOverflow_t, 933 MpiEventDataSasInitTableOverflow_t,
diff --git a/drivers/message/fusion/lsi/mpi_lan.h b/drivers/message/fusion/lsi/mpi_lan.h
index 03253b53b785..f41fcb69b359 100644
--- a/drivers/message/fusion/lsi/mpi_lan.h
+++ b/drivers/message/fusion/lsi/mpi_lan.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 LSI Corporation. 2 * Copyright (c) 2000-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_lan.h 5 * Name: mpi_lan.h
diff --git a/drivers/message/fusion/lsi/mpi_log_fc.h b/drivers/message/fusion/lsi/mpi_log_fc.h
index e4dafcefeecd..face6e7acc72 100644
--- a/drivers/message/fusion/lsi/mpi_log_fc.h
+++ b/drivers/message/fusion/lsi/mpi_log_fc.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2001 LSI Corporation. All rights reserved. 2 * Copyright (c) 2000-2008 LSI Corporation. All rights reserved.
3 * 3 *
4 * NAME: fc_log.h 4 * NAME: fc_log.h
5 * SUMMARY: MPI IocLogInfo definitions for the SYMFC9xx chips 5 * SUMMARY: MPI IocLogInfo definitions for the SYMFC9xx chips
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index af9da03e95e5..691620dbedd2 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -1,6 +1,6 @@
1/*************************************************************************** 1/***************************************************************************
2 * * 2 * *
3 * Copyright 2003 LSI Corporation. All rights reserved. * 3 * Copyright (c) 2000-2008 LSI Corporation. All rights reserved. *
4 * * 4 * *
5 * Description * 5 * Description *
6 * ------------ * 6 * ------------ *
@@ -73,6 +73,8 @@
73#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO (0x00070004) 73#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO (0x00070004)
74#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO_REQ (0x00070005) 74#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO_REQ (0x00070005)
75 75
76#define IOP_LOGINFO_CODE_LOG_TIMESTAMP_EVENT (0x00080000)
77
76/****************************************************************************/ 78/****************************************************************************/
77/* PL LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = PL */ 79/* PL LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = PL */
78/****************************************************************************/ 80/****************************************************************************/
@@ -92,7 +94,7 @@
92#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_OPEN_TIMEOUT_EXP (0x0000000C) 94#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_OPEN_TIMEOUT_EXP (0x0000000C)
93#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_UNUSED_0D (0x0000000D) 95#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_UNUSED_0D (0x0000000D)
94#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_DVTBLE_ACCSS_FAIL (0x0000000E) 96#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_DVTBLE_ACCSS_FAIL (0x0000000E)
95#define PL_LOGINFO_SUB CODE_OPEN_FAIL_BAD_DEST (0x00000011) 97#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_BAD_DEST (0x00000011)
96#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RATE_NOT_SUPP (0x00000012) 98#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RATE_NOT_SUPP (0x00000012)
97#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PROT_NOT_SUPP (0x00000013) 99#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PROT_NOT_SUPP (0x00000013)
98#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON0 (0x00000014) 100#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON0 (0x00000014)
@@ -159,10 +161,11 @@
159 161
160#define PL_LOGINFO_SUB_CODE_INVALID_SGL (0x00000200) 162#define PL_LOGINFO_SUB_CODE_INVALID_SGL (0x00000200)
161#define PL_LOGINFO_SUB_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00000300) 163#define PL_LOGINFO_SUB_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00000300)
162#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400) /* Bits 0-3 encode Transport Status Register (offset 0x08) */ 164#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400)
163 /* Bit 0 is Status Bit 0: FrameXferErr */ 165/* Bits 0-3 encode Transport Status Register (offset 0x08) */
164 /* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */ 166/* Bit 0 is Status Bit 0: FrameXferErr */
165 /* Bit 3 is Status Bit 18 WriteDataLengthGTDataLengthErr */ 167/* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */
168/* Bit 3 is Status Bit 18 WriteDataLenghtGTDataLengthErr */
166 169
167#define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW (0x00000500) 170#define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW (0x00000500)
168#define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00000600) 171#define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00000600)
@@ -177,6 +180,11 @@
177#define PL_LOGINFO_SUB_CODE_DISCOVERY_REMOTE_SEP_RESET (0x00000E01) 180#define PL_LOGINFO_SUB_CODE_DISCOVERY_REMOTE_SEP_RESET (0x00000E01)
178#define PL_LOGINFO_SUB_CODE_SECOND_OPEN (0x00000F00) 181#define PL_LOGINFO_SUB_CODE_SECOND_OPEN (0x00000F00)
179#define PL_LOGINFO_SUB_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00001000) 182#define PL_LOGINFO_SUB_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00001000)
183#define PL_LOGINFO_SUB_CODE_BREAK_ON_SATA_CONNECTION (0x00002000)
184/* not currently used in mainline */
185#define PL_LOGINFO_SUB_CODE_BREAK_ON_STUCK_LINK (0x00003000)
186#define PL_LOGINFO_SUB_CODE_BREAK_ON_STUCK_LINK_AIP (0x00004000)
187#define PL_LOGINFO_SUB_CODE_BREAK_ON_INCOMPLETE_BREAK_RCVD (0x00005000)
180 188
181#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_FAILURE (0x00200000) /* Can't get SMP Frame */ 189#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_FAILURE (0x00200000) /* Can't get SMP Frame */
182#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_READ_ERROR (0x00200010) /* Error occured on SMP Read */ 190#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_READ_ERROR (0x00200010) /* Error occured on SMP Read */
@@ -243,6 +251,8 @@
243#define IR_LOGINFO_VOLUME_ACTIVATE_VOLUME_FAILED (0x00010014) 251#define IR_LOGINFO_VOLUME_ACTIVATE_VOLUME_FAILED (0x00010014)
244/* Activation failed trying to import the volume */ 252/* Activation failed trying to import the volume */
245#define IR_LOGINFO_VOLUME_ACTIVATING_IMPORT_VOLUME_FAILED (0x00010015) 253#define IR_LOGINFO_VOLUME_ACTIVATING_IMPORT_VOLUME_FAILED (0x00010015)
254/* Activation failed trying to import the volume */
255#define IR_LOGINFO_VOLUME_ACTIVATING_TOO_MANY_PHYS_DISKS (0x00010016)
246 256
247/* Phys Disk failed, too many phys disks */ 257/* Phys Disk failed, too many phys disks */
248#define IR_LOGINFO_PHYSDISK_CREATE_TOO_MANY_DISKS (0x00010020) 258#define IR_LOGINFO_PHYSDISK_CREATE_TOO_MANY_DISKS (0x00010020)
@@ -285,6 +295,21 @@
285/* Compatibility Error : IME size limited to < 2TB */ 295/* Compatibility Error : IME size limited to < 2TB */
286#define IR_LOGINFO_COMPAT_ERROR_IME_VOL_NOT_CURRENTLY_SUPPORTED (0x0001003D) 296#define IR_LOGINFO_COMPAT_ERROR_IME_VOL_NOT_CURRENTLY_SUPPORTED (0x0001003D)
287 297
298/* Device Firmware Update: DFU can only be started once */
299#define IR_LOGINFO_DEV_FW_UPDATE_ERR_DFU_IN_PROGRESS (0x00010050)
300/* Device Firmware Update: Volume must be Optimal/Active/non-Quiesced */
301#define IR_LOGINFO_DEV_FW_UPDATE_ERR_DEVICE_IN_INVALID_STATE (0x00010051)
302/* Device Firmware Update: DFU Timeout cannot be zero */
303#define IR_LOGINFO_DEV_FW_UPDATE_ERR_INVALID_TIMEOUT (0x00010052)
304/* Device Firmware Update: CREATE TIMER FAILED */
305#define IR_LOGINFO_DEV_FW_UPDATE_ERR_NO_TIMERS (0x00010053)
306/* Device Firmware Update: Failed to read SAS_IO_UNIT_PG_1 */
307#define IR_LOGINFO_DEV_FW_UPDATE_ERR_READING_CFG_PAGE (0x00010054)
308/* Device Firmware Update: Invalid SAS_IO_UNIT_PG_1 value(s) */
309#define IR_LOGINFO_DEV_FW_UPDATE_ERR_PORT_IO_TIMEOUTS_REQUIRED (0x00010055)
310/* Device Firmware Update: Unable to allocate memory for page */
311#define IR_LOGINFO_DEV_FW_UPDATE_ERR_ALLOC_CFG_PAGE (0x00010056)
312
288 313
289/****************************************************************************/ 314/****************************************************************************/
290/* Defines for convenience */ 315/* Defines for convenience */
diff --git a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h
index 2856108421d7..add60cc85be1 100644
--- a/drivers/message/fusion/lsi/mpi_raid.h
+++ b/drivers/message/fusion/lsi/mpi_raid.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2001-2007 LSI Corporation. 2 * Copyright (c) 2001-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_raid.h 5 * Name: mpi_raid.h
6 * Title: MPI RAID message and structures 6 * Title: MPI RAID message and structures
7 * Creation Date: February 27, 2001 7 * Creation Date: February 27, 2001
8 * 8 *
9 * mpi_raid.h Version: 01.05.03 9 * mpi_raid.h Version: 01.05.05
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -34,6 +34,9 @@
34 * _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE. 34 * _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
35 * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and 35 * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and
36 * associated defines. 36 * associated defines.
37 * 08-07-07 01.05.04 Added Disable Full Rebuild bit to the ActionDataWord
38 * for the RAID Action MPI_RAID_ACTION_DISABLE_VOLUME.
39 * 01-15-08 01.05.05 Added define for MPI_RAID_ACTION_SET_VOLUME_NAME.
37 * -------------------------------------------------------------------------- 40 * --------------------------------------------------------------------------
38 */ 41 */
39 42
@@ -93,6 +96,7 @@ typedef struct _MSG_RAID_ACTION
93#define MPI_RAID_ACTION_SET_RESYNC_RATE (0x13) 96#define MPI_RAID_ACTION_SET_RESYNC_RATE (0x13)
94#define MPI_RAID_ACTION_SET_DATA_SCRUB_RATE (0x14) 97#define MPI_RAID_ACTION_SET_DATA_SCRUB_RATE (0x14)
95#define MPI_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15) 98#define MPI_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
99#define MPI_RAID_ACTION_SET_VOLUME_NAME (0x16)
96 100
97/* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */ 101/* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */
98#define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC (0x00000001) 102#define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC (0x00000001)
@@ -105,6 +109,9 @@ typedef struct _MSG_RAID_ACTION
105#define MPI_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) 109#define MPI_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
106#define MPI_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000002) 110#define MPI_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000002)
107 111
112/* ActionDataWord defines for use with MPI_RAID_ACTION_DISABLE_VOLUME action */
113#define MPI_RAID_ACTION_ADATA_DISABLE_FULL_REBUILD (0x00000001)
114
108/* ActionDataWord defines for use with MPI_RAID_ACTION_ACTIVATE_VOLUME action */ 115/* ActionDataWord defines for use with MPI_RAID_ACTION_ACTIVATE_VOLUME action */
109#define MPI_RAID_ACTION_ADATA_INACTIVATE_ALL (0x00000001) 116#define MPI_RAID_ACTION_ADATA_INACTIVATE_ALL (0x00000001)
110 117
diff --git a/drivers/message/fusion/lsi/mpi_sas.h b/drivers/message/fusion/lsi/mpi_sas.h
index 33fca83cefc2..ab410036bbfc 100644
--- a/drivers/message/fusion/lsi/mpi_sas.h
+++ b/drivers/message/fusion/lsi/mpi_sas.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2004-2006 LSI Corporation. 2 * Copyright (c) 2004-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_sas.h 5 * Name: mpi_sas.h
6 * Title: MPI Serial Attached SCSI structures and definitions 6 * Title: MPI Serial Attached SCSI structures and definitions
7 * Creation Date: August 19, 2004 7 * Creation Date: August 19, 2004
8 * 8 *
9 * mpi_sas.h Version: 01.05.04 9 * mpi_sas.h Version: 01.05.05
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -23,6 +23,10 @@
23 * reply. 23 * reply.
24 * 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO 24 * 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO
25 * Unit Control request. 25 * Unit Control request.
26 * 01-15-08 01.05.05 Added support for MPI_SAS_OP_SET_IOC_PARAMETER,
27 * including adding IOCParameter and IOCParameter value
28 * fields to SAS IO Unit Control Request.
29 * Added MPI_SAS_DEVICE_INFO_PRODUCT_SPECIFIC define.
26 * -------------------------------------------------------------------------- 30 * --------------------------------------------------------------------------
27 */ 31 */
28 32
@@ -60,6 +64,8 @@
60 * Values for the SAS DeviceInfo field used in SAS Device Status Change Event 64 * Values for the SAS DeviceInfo field used in SAS Device Status Change Event
61 * data and SAS IO Unit Configuration pages. 65 * data and SAS IO Unit Configuration pages.
62 */ 66 */
67#define MPI_SAS_DEVICE_INFO_PRODUCT_SPECIFIC (0xF0000000)
68
63#define MPI_SAS_DEVICE_INFO_SEP (0x00004000) 69#define MPI_SAS_DEVICE_INFO_SEP (0x00004000)
64#define MPI_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) 70#define MPI_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
65#define MPI_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000) 71#define MPI_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
@@ -216,7 +222,7 @@ typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
216 U8 ChainOffset; /* 02h */ 222 U8 ChainOffset; /* 02h */
217 U8 Function; /* 03h */ 223 U8 Function; /* 03h */
218 U16 DevHandle; /* 04h */ 224 U16 DevHandle; /* 04h */
219 U8 Reserved3; /* 06h */ 225 U8 IOCParameter; /* 06h */
220 U8 MsgFlags; /* 07h */ 226 U8 MsgFlags; /* 07h */
221 U32 MsgContext; /* 08h */ 227 U32 MsgContext; /* 08h */
222 U8 TargetID; /* 0Ch */ 228 U8 TargetID; /* 0Ch */
@@ -225,7 +231,7 @@ typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
225 U8 PrimFlags; /* 0Fh */ 231 U8 PrimFlags; /* 0Fh */
226 U32 Primitive; /* 10h */ 232 U32 Primitive; /* 10h */
227 U64 SASAddress; /* 14h */ 233 U64 SASAddress; /* 14h */
228 U32 Reserved4; /* 1Ch */ 234 U32 IOCParameterValue; /* 1Ch */
229} MSG_SAS_IOUNIT_CONTROL_REQUEST, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REQUEST, 235} MSG_SAS_IOUNIT_CONTROL_REQUEST, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REQUEST,
230 SasIoUnitControlRequest_t, MPI_POINTER pSasIoUnitControlRequest_t; 236 SasIoUnitControlRequest_t, MPI_POINTER pSasIoUnitControlRequest_t;
231 237
@@ -241,6 +247,8 @@ typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
241#define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) 247#define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
242#define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE (0x0D) /* obsolete name */ 248#define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE (0x0D) /* obsolete name */
243#define MPI_SAS_OP_REMOVE_DEVICE (0x0D) 249#define MPI_SAS_OP_REMOVE_DEVICE (0x0D)
250#define MPI_SAS_OP_SET_IOC_PARAMETER (0x0E)
251#define MPI_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
244 252
245/* values for the PrimFlags field */ 253/* values for the PrimFlags field */
246#define MPI_SAS_PRIMFLAGS_SINGLE (0x08) 254#define MPI_SAS_PRIMFLAGS_SINGLE (0x08)
@@ -256,7 +264,7 @@ typedef struct _MSG_SAS_IOUNIT_CONTROL_REPLY
256 U8 MsgLength; /* 02h */ 264 U8 MsgLength; /* 02h */
257 U8 Function; /* 03h */ 265 U8 Function; /* 03h */
258 U16 DevHandle; /* 04h */ 266 U16 DevHandle; /* 04h */
259 U8 Reserved3; /* 06h */ 267 U8 IOCParameter; /* 06h */
260 U8 MsgFlags; /* 07h */ 268 U8 MsgFlags; /* 07h */
261 U32 MsgContext; /* 08h */ 269 U32 MsgContext; /* 08h */
262 U16 Reserved4; /* 0Ch */ 270 U16 Reserved4; /* 0Ch */
diff --git a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h
index ff8c37d3fdcb..c3dea7f6909d 100644
--- a/drivers/message/fusion/lsi/mpi_targ.h
+++ b/drivers/message/fusion/lsi/mpi_targ.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 LSI Corporation. 2 * Copyright (c) 2000-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_targ.h 5 * Name: mpi_targ.h
diff --git a/drivers/message/fusion/lsi/mpi_tool.h b/drivers/message/fusion/lsi/mpi_tool.h
index 8834ae6ce0f2..53cd715aa7e4 100644
--- a/drivers/message/fusion/lsi/mpi_tool.h
+++ b/drivers/message/fusion/lsi/mpi_tool.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2001-2005 LSI Corporation. 2 * Copyright (c) 2001-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_tool.h 5 * Name: mpi_tool.h
diff --git a/drivers/message/fusion/lsi/mpi_type.h b/drivers/message/fusion/lsi/mpi_type.h
index 08dad9c1e446..888b26dbc413 100644
--- a/drivers/message/fusion/lsi/mpi_type.h
+++ b/drivers/message/fusion/lsi/mpi_type.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2004 LSI Corporation. 2 * Copyright (c) 2000-2008 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi_type.h 5 * Name: mpi_type.h
6 * Title: MPI Basic type definitions 6 * Title: MPI Basic type definitions
7 * Creation Date: June 6, 2000 7 * Creation Date: June 6, 2000
8 * 8 *
9 * mpi_type.h Version: 01.05.01 9 * mpi_type.h Version: 01.05.02
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index c4e8b9aa3827..96ac88317b8e 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -79,9 +79,22 @@ MODULE_VERSION(my_VERSION);
79/* 79/*
80 * cmd line parameters 80 * cmd line parameters
81 */ 81 */
82static int mpt_msi_enable = -1; 82
83module_param(mpt_msi_enable, int, 0); 83static int mpt_msi_enable_spi;
84MODULE_PARM_DESC(mpt_msi_enable, " MSI Support Enable (default=0)"); 84module_param(mpt_msi_enable_spi, int, 0);
85MODULE_PARM_DESC(mpt_msi_enable_spi, " Enable MSI Support for SPI \
86 controllers (default=0)");
87
88static int mpt_msi_enable_fc;
89module_param(mpt_msi_enable_fc, int, 0);
90MODULE_PARM_DESC(mpt_msi_enable_fc, " Enable MSI Support for FC \
91 controllers (default=0)");
92
93static int mpt_msi_enable_sas;
94module_param(mpt_msi_enable_sas, int, 1);
95MODULE_PARM_DESC(mpt_msi_enable_sas, " Enable MSI Support for SAS \
96 controllers (default=1)");
97
85 98
86static int mpt_channel_mapping; 99static int mpt_channel_mapping;
87module_param(mpt_channel_mapping, int, 0); 100module_param(mpt_channel_mapping, int, 0);
@@ -91,7 +104,17 @@ static int mpt_debug_level;
91static int mpt_set_debug_level(const char *val, struct kernel_param *kp); 104static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
92module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int, 105module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
93 &mpt_debug_level, 0600); 106 &mpt_debug_level, 0600);
94MODULE_PARM_DESC(mpt_debug_level, " debug level - refer to mptdebug.h - (default=0)"); 107MODULE_PARM_DESC(mpt_debug_level, " debug level - refer to mptdebug.h \
108 - (default=0)");
109
110int mpt_fwfault_debug;
111EXPORT_SYMBOL(mpt_fwfault_debug);
112module_param_call(mpt_fwfault_debug, param_set_int, param_get_int,
113 &mpt_fwfault_debug, 0600);
114MODULE_PARM_DESC(mpt_fwfault_debug, "Enable detection of Firmware fault"
115 " and halt Firmware on fault - (default=0)");
116
117
95 118
96#ifdef MFCNT 119#ifdef MFCNT
97static int mfcounter = 0; 120static int mfcounter = 0;
@@ -1751,16 +1774,25 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1751 ioc->bus_type = SAS; 1774 ioc->bus_type = SAS;
1752 } 1775 }
1753 1776
1754 if (mpt_msi_enable == -1) {
1755 /* Enable on SAS, disable on FC and SPI */
1756 if (ioc->bus_type == SAS)
1757 ioc->msi_enable = 1;
1758 else
1759 ioc->msi_enable = 0;
1760 } else
1761 /* follow flag: 0 - disable; 1 - enable */
1762 ioc->msi_enable = mpt_msi_enable;
1763 1777
1778 switch (ioc->bus_type) {
1779
1780 case SAS:
1781 ioc->msi_enable = mpt_msi_enable_sas;
1782 break;
1783
1784 case SPI:
1785 ioc->msi_enable = mpt_msi_enable_spi;
1786 break;
1787
1788 case FC:
1789 ioc->msi_enable = mpt_msi_enable_fc;
1790 break;
1791
1792 default:
1793 ioc->msi_enable = 0;
1794 break;
1795 }
1764 if (ioc->errata_flag_1064) 1796 if (ioc->errata_flag_1064)
1765 pci_disable_io_access(pdev); 1797 pci_disable_io_access(pdev);
1766 1798
@@ -6313,6 +6345,33 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
6313 *size = y; 6345 *size = y;
6314} 6346}
6315 6347
6348
6349/**
6350 * mpt_halt_firmware - Halts the firmware if it is operational and panic
6351 * the kernel
6352 * @ioc: Pointer to MPT_ADAPTER structure
6353 *
6354 **/
6355void
6356mpt_halt_firmware(MPT_ADAPTER *ioc)
6357{
6358 u32 ioc_raw_state;
6359
6360 ioc_raw_state = mpt_GetIocState(ioc, 0);
6361
6362 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
6363 printk(MYIOC_s_ERR_FMT "IOC is in FAULT state (%04xh)!!!\n",
6364 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
6365 panic("%s: IOC Fault (%04xh)!!!\n", ioc->name,
6366 ioc_raw_state & MPI_DOORBELL_DATA_MASK);
6367 } else {
6368 CHIPREG_WRITE32(&ioc->chip->Doorbell, 0xC0FFEE00);
6369 panic("%s: Firmware is halted due to command timeout\n",
6370 ioc->name);
6371 }
6372}
6373EXPORT_SYMBOL(mpt_halt_firmware);
6374
6316/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 6375/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
6317/* 6376/*
6318 * Reset Handling 6377 * Reset Handling
@@ -6345,6 +6404,8 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
6345 printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name); 6404 printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name);
6346 printk("MF count 0x%x !\n", ioc->mfcnt); 6405 printk("MF count 0x%x !\n", ioc->mfcnt);
6347#endif 6406#endif
6407 if (mpt_fwfault_debug)
6408 mpt_halt_firmware(ioc);
6348 6409
6349 /* Reset the adapter. Prevent more than 1 call to 6410 /* Reset the adapter. Prevent more than 1 call to
6350 * mpt_do_ioc_recovery at any instant in time. 6411 * mpt_do_ioc_recovery at any instant in time.
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index dff048cfa101..b3e981d2a506 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -922,11 +922,14 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
922extern int mpt_findImVolumes(MPT_ADAPTER *ioc); 922extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
923extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); 923extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
924extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk); 924extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
925extern void mpt_halt_firmware(MPT_ADAPTER *ioc);
926
925 927
926/* 928/*
927 * Public data decl's... 929 * Public data decl's...
928 */ 930 */
929extern struct list_head ioc_list; 931extern struct list_head ioc_list;
932extern int mpt_fwfault_debug;
930 933
931/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 934/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
932#endif /* } __KERNEL__ */ 935#endif /* } __KERNEL__ */
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index ee090413e598..e62c6bc4ad33 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1846,6 +1846,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1846 if (hd->timeouts < -1) 1846 if (hd->timeouts < -1)
1847 hd->timeouts++; 1847 hd->timeouts++;
1848 1848
1849 if (mpt_fwfault_debug)
1850 mpt_halt_firmware(ioc);
1851
1849 /* Most important! Set TaskMsgContext to SCpnt's MsgContext! 1852 /* Most important! Set TaskMsgContext to SCpnt's MsgContext!
1850 * (the IO to be ABORT'd) 1853 * (the IO to be ABORT'd)
1851 * 1854 *
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 419c378bd24b..56073199ceba 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -87,14 +87,6 @@ config PHANTOM
87 If you choose to build module, its name will be phantom. If unsure, 87 If you choose to build module, its name will be phantom. If unsure,
88 say N here. 88 say N here.
89 89
90config EEPROM_93CX6
91 tristate "EEPROM 93CX6 support"
92 ---help---
93 This is a driver for the EEPROM chipsets 93c46 and 93c66.
94 The driver supports both read as well as write commands.
95
96 If unsure, say N.
97
98config SGI_IOC4 90config SGI_IOC4
99 tristate "SGI IOC4 Base IO support" 91 tristate "SGI IOC4 Base IO support"
100 depends on PCI 92 depends on PCI
@@ -231,5 +223,6 @@ config DELL_LAPTOP
231 laptops. 223 laptops.
232 224
233source "drivers/misc/c2port/Kconfig" 225source "drivers/misc/c2port/Kconfig"
226source "drivers/misc/eeprom/Kconfig"
234 227
235endif # MISC_DEVICES 228endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 9cf8ae6e4b39..bc1199830554 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -10,14 +10,13 @@ obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
10obj-$(CONFIG_ICS932S401) += ics932s401.o 10obj-$(CONFIG_ICS932S401) += ics932s401.o
11obj-$(CONFIG_LKDTM) += lkdtm.o 11obj-$(CONFIG_LKDTM) += lkdtm.o
12obj-$(CONFIG_TIFM_CORE) += tifm_core.o 12obj-$(CONFIG_TIFM_CORE) += tifm_core.o
13obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
14obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o 13obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
15obj-$(CONFIG_PHANTOM) += phantom.o 14obj-$(CONFIG_PHANTOM) += phantom.o
16obj-$(CONFIG_SGI_IOC4) += ioc4.o 15obj-$(CONFIG_SGI_IOC4) += ioc4.o
17obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
18obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o 16obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
19obj-$(CONFIG_KGDB_TESTS) += kgdbts.o 17obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
20obj-$(CONFIG_SGI_XP) += sgi-xp/ 18obj-$(CONFIG_SGI_XP) += sgi-xp/
21obj-$(CONFIG_SGI_GRU) += sgi-gru/ 19obj-$(CONFIG_SGI_GRU) += sgi-gru/
22obj-$(CONFIG_HP_ILO) += hpilo.o 20obj-$(CONFIG_HP_ILO) += hpilo.o
23obj-$(CONFIG_C2PORT) += c2port/ 21obj-$(CONFIG_C2PORT) += c2port/
22obj-y += eeprom/
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
new file mode 100644
index 000000000000..c76df8cda5ef
--- /dev/null
+++ b/drivers/misc/eeprom/Kconfig
@@ -0,0 +1,59 @@
1menu "EEPROM support"
2
3config EEPROM_AT24
4 tristate "I2C EEPROMs from most vendors"
5 depends on I2C && SYSFS && EXPERIMENTAL
6 help
7 Enable this driver to get read/write support to most I2C EEPROMs,
8 after you configure the driver to know about each EEPROM on
9 your target board. Use these generic chip names, instead of
10 vendor-specific ones like at24c64 or 24lc02:
11
12 24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
13 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
14
15 Unless you like data loss puzzles, always be sure that any chip
16 you configure as a 24c32 (32 kbit) or larger is NOT really a
17 24c16 (16 kbit) or smaller, and vice versa. Marking the chip
18 as read-only won't help recover from this. Also, if your chip
19 has any software write-protect mechanism you may want to review the
20 code to make sure this driver won't turn it on by accident.
21
22 If you use this with an SMBus adapter instead of an I2C adapter,
23 full functionality is not available. Only smaller devices are
24 supported (24c16 and below, max 4 kByte).
25
26 This driver can also be built as a module. If so, the module
27 will be called at24.
28
29config EEPROM_AT25
30 tristate "SPI EEPROMs from most vendors"
31 depends on SPI && SYSFS
32 help
33 Enable this driver to get read/write support to most SPI EEPROMs,
34 after you configure the board init code to know about each eeprom
35 on your target board.
36
37 This driver can also be built as a module. If so, the module
38 will be called at25.
39
40config EEPROM_LEGACY
41 tristate "Old I2C EEPROM reader"
42 depends on I2C && SYSFS
43 help
44 If you say yes here you get read-only access to the EEPROM data
45 available on modern memory DIMMs and Sony Vaio laptops via I2C. Such
46 EEPROMs could theoretically be available on other devices as well.
47
48 This driver can also be built as a module. If so, the module
49 will be called eeprom.
50
51config EEPROM_93CX6
52 tristate "EEPROM 93CX6 support"
53 help
54 This is a driver for the EEPROM chipsets 93c46 and 93c66.
55 The driver supports both read as well as write commands.
56
57 If unsure, say N.
58
59endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
new file mode 100644
index 000000000000..539dd8f88128
--- /dev/null
+++ b/drivers/misc/eeprom/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_EEPROM_AT24) += at24.o
2obj-$(CONFIG_EEPROM_AT25) += at25.o
3obj-$(CONFIG_EEPROM_LEGACY) += eeprom.o
4obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
diff --git a/drivers/i2c/chips/at24.c b/drivers/misc/eeprom/at24.c
index d4775528abc6..d4775528abc6 100644
--- a/drivers/i2c/chips/at24.c
+++ b/drivers/misc/eeprom/at24.c
diff --git a/drivers/spi/at25.c b/drivers/misc/eeprom/at25.c
index 290dbe99647a..290dbe99647a 100644
--- a/drivers/spi/at25.c
+++ b/drivers/misc/eeprom/at25.c
diff --git a/drivers/i2c/chips/eeprom.c b/drivers/misc/eeprom/eeprom.c
index 2c27193aeaa0..2c27193aeaa0 100644
--- a/drivers/i2c/chips/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
diff --git a/drivers/misc/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 15b1780025c8..15b1780025c8 100644
--- a/drivers/misc/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 73b7fb8de47a..82fb9958f22f 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -899,7 +899,7 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
899 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", 899 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
900 part_sn2->remote_vars_pa); 900 part_sn2->remote_vars_pa);
901 901
902 part->last_heartbeat = remote_vars->heartbeat; 902 part->last_heartbeat = remote_vars->heartbeat - 1;
903 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", 903 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
904 part->last_heartbeat); 904 part->last_heartbeat);
905 905
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 745ac188babe..d15d8b79d8e5 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -646,7 +646,7 @@ static const struct net_device_ops etherh_netdev_ops = {
646 .ndo_get_stats = ei_get_stats, 646 .ndo_get_stats = ei_get_stats,
647 .ndo_set_multicast_list = ei_set_multicast_list, 647 .ndo_set_multicast_list = ei_set_multicast_list,
648 .ndo_validate_addr = eth_validate_addr, 648 .ndo_validate_addr = eth_validate_addr,
649 .ndo_set_mac_addr = eth_set_mac_addr, 649 .ndo_set_mac_address = eth_set_mac_addr,
650 .ndo_change_mtu = eth_change_mtu, 650 .ndo_change_mtu = eth_change_mtu,
651#ifdef CONFIG_NET_POLL_CONTROLLER 651#ifdef CONFIG_NET_POLL_CONTROLLER
652 .ndo_poll_controller = ei_poll, 652 .ndo_poll_controller = ei_poll,
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 337488ec707c..a4eb6c40678c 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -37,7 +37,10 @@ static int phy_debug = 0;
37#define __ei_open ax_ei_open 37#define __ei_open ax_ei_open
38#define __ei_close ax_ei_close 38#define __ei_close ax_ei_close
39#define __ei_poll ax_ei_poll 39#define __ei_poll ax_ei_poll
40#define __ei_start_xmit ax_ei_start_xmit
40#define __ei_tx_timeout ax_ei_tx_timeout 41#define __ei_tx_timeout ax_ei_tx_timeout
42#define __ei_get_stats ax_ei_get_stats
43#define __ei_set_multicast_list ax_ei_set_multicast_list
41#define __ei_interrupt ax_ei_interrupt 44#define __ei_interrupt ax_ei_interrupt
42#define ____alloc_ei_netdev ax__alloc_ei_netdev 45#define ____alloc_ei_netdev ax__alloc_ei_netdev
43#define __NS8390_init ax_NS8390_init 46#define __NS8390_init ax_NS8390_init
@@ -623,6 +626,23 @@ static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom)
623} 626}
624#endif 627#endif
625 628
629static const struct net_device_ops ax_netdev_ops = {
630 .ndo_open = ax_open,
631 .ndo_stop = ax_close,
632 .ndo_do_ioctl = ax_ioctl,
633
634 .ndo_start_xmit = ax_ei_start_xmit,
635 .ndo_tx_timeout = ax_ei_tx_timeout,
636 .ndo_get_stats = ax_ei_get_stats,
637 .ndo_set_multicast_list = ax_ei_set_multicast_list,
638 .ndo_validate_addr = eth_validate_addr,
639 .ndo_set_mac_address = eth_mac_addr,
640 .ndo_change_mtu = eth_change_mtu,
641#ifdef CONFIG_NET_POLL_CONTROLLER
642 .ndo_poll_controller = ax_ei_poll,
643#endif
644};
645
626/* setup code */ 646/* setup code */
627 647
628static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) 648static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
@@ -738,9 +758,7 @@ static int ax_init_dev(struct net_device *dev, int first_init)
738 ei_status.get_8390_hdr = &ax_get_8390_hdr; 758 ei_status.get_8390_hdr = &ax_get_8390_hdr;
739 ei_status.priv = 0; 759 ei_status.priv = 0;
740 760
741 dev->open = ax_open; 761 dev->netdev_ops = &ax_netdev_ops;
742 dev->stop = ax_close;
743 dev->do_ioctl = ax_ioctl;
744 dev->ethtool_ops = &ax_ethtool_ops; 762 dev->ethtool_ops = &ax_ethtool_ops;
745 763
746 ax->msg_enable = NETIF_MSG_LINK; 764 ax->msg_enable = NETIF_MSG_LINK;
@@ -753,9 +771,6 @@ static int ax_init_dev(struct net_device *dev, int first_init)
753 ax->mii.mdio_write = ax_phy_write; 771 ax->mii.mdio_write = ax_phy_write;
754 ax->mii.dev = dev; 772 ax->mii.dev = dev;
755 773
756#ifdef CONFIG_NET_POLL_CONTROLLER
757 dev->poll_controller = ax_ei_poll;
758#endif
759 ax_NS8390_init(dev, 0); 774 ax_NS8390_init(dev, 0);
760 775
761 if (first_init) 776 if (first_init)
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 5ae131c147f9..c38512ebcea6 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -679,6 +679,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
679 dev_kfree_skb_any(skb); 679 dev_kfree_skb_any(skb);
680 return -ENOMEM; 680 return -ENOMEM;
681 } 681 }
682 bp->force_copybreak = 1;
682 } 683 }
683 684
684 rh = (struct rx_header *) skb->data; 685 rh = (struct rx_header *) skb->data;
@@ -800,7 +801,7 @@ static int b44_rx(struct b44 *bp, int budget)
800 /* Omit CRC. */ 801 /* Omit CRC. */
801 len -= 4; 802 len -= 4;
802 803
803 if (len > RX_COPY_THRESHOLD) { 804 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
804 int skb_size; 805 int skb_size;
805 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); 806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
806 if (skb_size < 0) 807 if (skb_size < 0)
@@ -2152,6 +2153,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2152 bp = netdev_priv(dev); 2153 bp = netdev_priv(dev);
2153 bp->sdev = sdev; 2154 bp->sdev = sdev;
2154 bp->dev = dev; 2155 bp->dev = dev;
2156 bp->force_copybreak = 0;
2155 2157
2156 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); 2158 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2157 2159
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 7db0c84a7950..e678498de6db 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -395,7 +395,7 @@ struct b44 {
395 u32 rx_pending; 395 u32 rx_pending;
396 u32 tx_pending; 396 u32 tx_pending;
397 u8 phy_addr; 397 u8 phy_addr;
398 398 u8 force_copybreak;
399 struct mii_if_info mii_if; 399 struct mii_if_info mii_if;
400}; 400};
401 401
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index fd705d1295a7..15a5cf0f676b 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -1,6 +1,6 @@
1/* bnx2x.h: Broadcom Everest network driver. 1/* bnx2x.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2008 Broadcom Corporation 3 * Copyright (c) 2007-2009 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -20,6 +20,11 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
24#define BCM_VLAN 1
25#endif
26
27
23/* error/debug prints */ 28/* error/debug prints */
24 29
25#define DRV_MODULE_NAME "bnx2x" 30#define DRV_MODULE_NAME "bnx2x"
@@ -78,11 +83,6 @@
78#endif 83#endif
79 84
80 85
81#ifdef NETIF_F_HW_VLAN_TX
82#define BCM_VLAN 1
83#endif
84
85
86#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) 86#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
87#define U64_HI(x) (u32)(((u64)(x)) >> 32) 87#define U64_HI(x) (u32)(((u64)(x)) >> 32)
88#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) 88#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
@@ -150,6 +150,9 @@ struct sw_rx_page {
150 150
151#define PAGES_PER_SGE_SHIFT 0 151#define PAGES_PER_SGE_SHIFT 0
152#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT) 152#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
153#define SGE_PAGE_SIZE PAGE_SIZE
154#define SGE_PAGE_SHIFT PAGE_SHIFT
155#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN(addr)
153 156
154#define BCM_RX_ETH_PAYLOAD_ALIGN 64 157#define BCM_RX_ETH_PAYLOAD_ALIGN 64
155 158
@@ -268,14 +271,7 @@ struct bnx2x_fastpath {
268 271
269#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 272#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
270 273
271#define BNX2X_HAS_TX_WORK(fp) \ 274#define BNX2X_HAS_WORK(fp) (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))
272 ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) || \
273 (fp->tx_pkt_prod != fp->tx_pkt_cons))
274
275#define BNX2X_HAS_RX_WORK(fp) \
276 (fp->rx_comp_cons != rx_cons_sb)
277
278#define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp))
279 275
280 276
281/* MC hsi */ 277/* MC hsi */
@@ -736,7 +732,7 @@ struct bnx2x {
736 struct bnx2x_fastpath fp[MAX_CONTEXT]; 732 struct bnx2x_fastpath fp[MAX_CONTEXT];
737 void __iomem *regview; 733 void __iomem *regview;
738 void __iomem *doorbells; 734 void __iomem *doorbells;
739#define BNX2X_DB_SIZE (16*2048) 735#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
740 736
741 struct net_device *dev; 737 struct net_device *dev;
742 struct pci_dev *pdev; 738 struct pci_dev *pdev;
@@ -801,6 +797,8 @@ struct bnx2x {
801#define TPA_ENABLE_FLAG 0x80 797#define TPA_ENABLE_FLAG 0x80
802#define NO_MCP_FLAG 0x100 798#define NO_MCP_FLAG 0x100
803#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) 799#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
800#define HW_VLAN_TX_FLAG 0x400
801#define HW_VLAN_RX_FLAG 0x800
804 802
805 int func; 803 int func;
806#define BP_PORT(bp) (bp->func % PORT_MAX) 804#define BP_PORT(bp) (bp->func % PORT_MAX)
@@ -811,7 +809,7 @@ struct bnx2x {
811 int pm_cap; 809 int pm_cap;
812 int pcie_cap; 810 int pcie_cap;
813 811
814 struct work_struct sp_task; 812 struct delayed_work sp_task;
815 struct work_struct reset_task; 813 struct work_struct reset_task;
816 814
817 struct timer_list timer; 815 struct timer_list timer;
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index fefa6ab13064..aea26b4dc453 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1,4 +1,4 @@
1/* Copyright 2008 Broadcom Corporation 1/* Copyright 2008-2009 Broadcom Corporation
2 * 2 *
3 * Unless you and Broadcom execute a separate written software license 3 * Unless you and Broadcom execute a separate written software license
4 * agreement governing use of this software, this software is licensed to you 4 * agreement governing use of this software, this software is licensed to you
@@ -317,6 +317,9 @@ static u8 bnx2x_emac_enable(struct link_params *params,
317 val &= ~0x810; 317 val &= ~0x810;
318 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); 318 EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
319 319
320 /* enable emac */
321 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
322
320 /* enable emac for jumbo packets */ 323 /* enable emac for jumbo packets */
321 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, 324 EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
322 (EMAC_RX_MTU_SIZE_JUMBO_ENA | 325 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
@@ -1609,7 +1612,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1609 u32 gp_status) 1612 u32 gp_status)
1610{ 1613{
1611 struct bnx2x *bp = params->bp; 1614 struct bnx2x *bp = params->bp;
1612 1615 u16 new_line_speed;
1613 u8 rc = 0; 1616 u8 rc = 0;
1614 vars->link_status = 0; 1617 vars->link_status = 0;
1615 1618
@@ -1629,7 +1632,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1629 1632
1630 switch (gp_status & GP_STATUS_SPEED_MASK) { 1633 switch (gp_status & GP_STATUS_SPEED_MASK) {
1631 case GP_STATUS_10M: 1634 case GP_STATUS_10M:
1632 vars->line_speed = SPEED_10; 1635 new_line_speed = SPEED_10;
1633 if (vars->duplex == DUPLEX_FULL) 1636 if (vars->duplex == DUPLEX_FULL)
1634 vars->link_status |= LINK_10TFD; 1637 vars->link_status |= LINK_10TFD;
1635 else 1638 else
@@ -1637,7 +1640,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1637 break; 1640 break;
1638 1641
1639 case GP_STATUS_100M: 1642 case GP_STATUS_100M:
1640 vars->line_speed = SPEED_100; 1643 new_line_speed = SPEED_100;
1641 if (vars->duplex == DUPLEX_FULL) 1644 if (vars->duplex == DUPLEX_FULL)
1642 vars->link_status |= LINK_100TXFD; 1645 vars->link_status |= LINK_100TXFD;
1643 else 1646 else
@@ -1646,7 +1649,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1646 1649
1647 case GP_STATUS_1G: 1650 case GP_STATUS_1G:
1648 case GP_STATUS_1G_KX: 1651 case GP_STATUS_1G_KX:
1649 vars->line_speed = SPEED_1000; 1652 new_line_speed = SPEED_1000;
1650 if (vars->duplex == DUPLEX_FULL) 1653 if (vars->duplex == DUPLEX_FULL)
1651 vars->link_status |= LINK_1000TFD; 1654 vars->link_status |= LINK_1000TFD;
1652 else 1655 else
@@ -1654,7 +1657,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1654 break; 1657 break;
1655 1658
1656 case GP_STATUS_2_5G: 1659 case GP_STATUS_2_5G:
1657 vars->line_speed = SPEED_2500; 1660 new_line_speed = SPEED_2500;
1658 if (vars->duplex == DUPLEX_FULL) 1661 if (vars->duplex == DUPLEX_FULL)
1659 vars->link_status |= LINK_2500TFD; 1662 vars->link_status |= LINK_2500TFD;
1660 else 1663 else
@@ -1671,32 +1674,32 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1671 case GP_STATUS_10G_KX4: 1674 case GP_STATUS_10G_KX4:
1672 case GP_STATUS_10G_HIG: 1675 case GP_STATUS_10G_HIG:
1673 case GP_STATUS_10G_CX4: 1676 case GP_STATUS_10G_CX4:
1674 vars->line_speed = SPEED_10000; 1677 new_line_speed = SPEED_10000;
1675 vars->link_status |= LINK_10GTFD; 1678 vars->link_status |= LINK_10GTFD;
1676 break; 1679 break;
1677 1680
1678 case GP_STATUS_12G_HIG: 1681 case GP_STATUS_12G_HIG:
1679 vars->line_speed = SPEED_12000; 1682 new_line_speed = SPEED_12000;
1680 vars->link_status |= LINK_12GTFD; 1683 vars->link_status |= LINK_12GTFD;
1681 break; 1684 break;
1682 1685
1683 case GP_STATUS_12_5G: 1686 case GP_STATUS_12_5G:
1684 vars->line_speed = SPEED_12500; 1687 new_line_speed = SPEED_12500;
1685 vars->link_status |= LINK_12_5GTFD; 1688 vars->link_status |= LINK_12_5GTFD;
1686 break; 1689 break;
1687 1690
1688 case GP_STATUS_13G: 1691 case GP_STATUS_13G:
1689 vars->line_speed = SPEED_13000; 1692 new_line_speed = SPEED_13000;
1690 vars->link_status |= LINK_13GTFD; 1693 vars->link_status |= LINK_13GTFD;
1691 break; 1694 break;
1692 1695
1693 case GP_STATUS_15G: 1696 case GP_STATUS_15G:
1694 vars->line_speed = SPEED_15000; 1697 new_line_speed = SPEED_15000;
1695 vars->link_status |= LINK_15GTFD; 1698 vars->link_status |= LINK_15GTFD;
1696 break; 1699 break;
1697 1700
1698 case GP_STATUS_16G: 1701 case GP_STATUS_16G:
1699 vars->line_speed = SPEED_16000; 1702 new_line_speed = SPEED_16000;
1700 vars->link_status |= LINK_16GTFD; 1703 vars->link_status |= LINK_16GTFD;
1701 break; 1704 break;
1702 1705
@@ -1708,6 +1711,15 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1708 break; 1711 break;
1709 } 1712 }
1710 1713
1714 /* Upon link speed change set the NIG into drain mode.
1715 Comes to deals with possible FIFO glitch due to clk change
1716 when speed is decreased without link down indicator */
1717 if (new_line_speed != vars->line_speed) {
1718 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
1719 + params->port*4, 0);
1720 msleep(1);
1721 }
1722 vars->line_speed = new_line_speed;
1711 vars->link_status |= LINK_STATUS_SERDES_LINK; 1723 vars->link_status |= LINK_STATUS_SERDES_LINK;
1712 1724
1713 if ((params->req_line_speed == SPEED_AUTO_NEG) && 1725 if ((params->req_line_speed == SPEED_AUTO_NEG) &&
@@ -3571,7 +3583,7 @@ static void bnx2x_set_xgxs_loopback(struct link_params *params,
3571 (MDIO_REG_BANK_CL73_IEEEB0 + 3583 (MDIO_REG_BANK_CL73_IEEEB0 +
3572 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 3584 (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
3573 0x6041); 3585 0x6041);
3574 3586 msleep(200);
3575 /* set aer mmd back */ 3587 /* set aer mmd back */
3576 bnx2x_set_aer_mmd(params, vars); 3588 bnx2x_set_aer_mmd(params, vars);
3577 3589
@@ -3870,9 +3882,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
3870 } 3882 }
3871 3883
3872 if (vars->phy_flags & PHY_XGXS_FLAG) { 3884 if (vars->phy_flags & PHY_XGXS_FLAG) {
3873 if (params->req_line_speed && 3885 if ((params->req_line_speed &&
3874 ((params->req_line_speed == SPEED_100) || 3886 ((params->req_line_speed == SPEED_100) ||
3875 (params->req_line_speed == SPEED_10))) { 3887 (params->req_line_speed == SPEED_10))) ||
3888 (!params->req_line_speed &&
3889 (params->speed_cap_mask >=
3890 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
3891 (params->speed_cap_mask <
3892 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
3893 )) {
3876 vars->phy_flags |= PHY_SGMII_FLAG; 3894 vars->phy_flags |= PHY_SGMII_FLAG;
3877 } else { 3895 } else {
3878 vars->phy_flags &= ~PHY_SGMII_FLAG; 3896 vars->phy_flags &= ~PHY_SGMII_FLAG;
@@ -4194,6 +4212,11 @@ static u8 bnx2x_update_link_down(struct link_params *params,
4194 /* activate nig drain */ 4212 /* activate nig drain */
4195 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); 4213 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
4196 4214
4215 /* disable emac */
4216 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
4217
4218 msleep(10);
4219
4197 /* reset BigMac */ 4220 /* reset BigMac */
4198 bnx2x_bmac_rx_disable(bp, params->port); 4221 bnx2x_bmac_rx_disable(bp, params->port);
4199 REG_WR(bp, GRCBASE_MISC + 4222 REG_WR(bp, GRCBASE_MISC +
@@ -4238,6 +4261,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
4238 4261
4239 /* update shared memory */ 4262 /* update shared memory */
4240 bnx2x_update_mng(params, vars->link_status); 4263 bnx2x_update_mng(params, vars->link_status);
4264 msleep(20);
4241 return rc; 4265 return rc;
4242} 4266}
4243/* This function should called upon link interrupt */ 4267/* This function should called upon link interrupt */
@@ -4276,6 +4300,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
4276 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), 4300 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
4277 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); 4301 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
4278 4302
4303 /* disable emac */
4304 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
4305
4279 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config); 4306 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
4280 4307
4281 /* Check external link change only for non-direct */ 4308 /* Check external link change only for non-direct */
@@ -4377,10 +4404,11 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, u32 shmem_base)
4377 ext_phy_addr[port], 4404 ext_phy_addr[port],
4378 MDIO_PMA_DEVAD, 4405 MDIO_PMA_DEVAD,
4379 MDIO_PMA_REG_ROM_VER1, &fw_ver1); 4406 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
4380 if (fw_ver1 == 0) { 4407 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
4381 DP(NETIF_MSG_LINK, 4408 DP(NETIF_MSG_LINK,
4382 "bnx2x_8073_common_init_phy port %x " 4409 "bnx2x_8073_common_init_phy port %x:"
4383 "fw Download failed\n", port); 4410 "Download failed. fw version = 0x%x\n",
4411 port, fw_ver1);
4384 return -EINVAL; 4412 return -EINVAL;
4385 } 4413 }
4386 4414
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 4be05847f86f..d3e7775a9ccf 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1,6 +1,6 @@
1/* bnx2x_main.c: Broadcom Everest network driver. 1/* bnx2x_main.c: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2008 Broadcom Corporation 3 * Copyright (c) 2007-2009 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -38,9 +38,7 @@
38#include <linux/time.h> 38#include <linux/time.h>
39#include <linux/ethtool.h> 39#include <linux/ethtool.h>
40#include <linux/mii.h> 40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX 41#include <linux/if_vlan.h>
42 #include <linux/if_vlan.h>
43#endif
44#include <net/ip.h> 42#include <net/ip.h>
45#include <net/tcp.h> 43#include <net/tcp.h>
46#include <net/checksum.h> 44#include <net/checksum.h>
@@ -59,8 +57,8 @@
59#include "bnx2x.h" 57#include "bnx2x.h"
60#include "bnx2x_init.h" 58#include "bnx2x_init.h"
61 59
62#define DRV_MODULE_VERSION "1.45.23" 60#define DRV_MODULE_VERSION "1.45.26"
63#define DRV_MODULE_RELDATE "2008/11/03" 61#define DRV_MODULE_RELDATE "2009/01/26"
64#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
65 63
66/* Time in jiffies before concluding the transmitter is hung */ 64/* Time in jiffies before concluding the transmitter is hung */
@@ -71,7 +69,7 @@ static char version[] __devinitdata =
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72 70
73MODULE_AUTHOR("Eliezer Tamir"); 71MODULE_AUTHOR("Eliezer Tamir");
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
75MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION); 74MODULE_VERSION(DRV_MODULE_VERSION);
77 75
@@ -95,6 +93,7 @@ MODULE_PARM_DESC(debug, "default debug msglevel");
95module_param(use_multi, int, 0); 93module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues"); 94MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif 95#endif
96static struct workqueue_struct *bnx2x_wq;
98 97
99enum bnx2x_board_type { 98enum bnx2x_board_type {
100 BCM57710 = 0, 99 BCM57710 = 0,
@@ -671,7 +670,8 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
671 synchronize_irq(bp->pdev->irq); 670 synchronize_irq(bp->pdev->irq);
672 671
673 /* make sure sp_task is not running */ 672 /* make sure sp_task is not running */
674 cancel_work_sync(&bp->sp_task); 673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
675} 675}
676 676
677/* fast path */ 677/* fast path */
@@ -733,6 +733,24 @@ static u16 bnx2x_ack_int(struct bnx2x *bp)
733 * fast path service functions 733 * fast path service functions
734 */ 734 */
735 735
736static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737{
738 u16 tx_cons_sb;
739
740 /* Tell compiler that status block fields can change */
741 barrier();
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743 return (fp->tx_pkt_cons != tx_cons_sb);
744}
745
746static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
747{
748 /* Tell compiler that consumer and producer can change */
749 barrier();
750 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
751
752}
753
736/* free skb in the packet ring at pos idx 754/* free skb in the packet ring at pos idx
737 * return idx of last bd freed 755 * return idx of last bd freed
738 */ 756 */
@@ -972,7 +990,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
972 return; 990 return;
973 991
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), 992 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 993 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT); 994 __free_pages(page, PAGES_PER_SGE_SHIFT);
977 995
978 sw_buf->page = NULL; 996 sw_buf->page = NULL;
@@ -1000,7 +1018,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1000 if (unlikely(page == NULL)) 1018 if (unlikely(page == NULL))
1001 return -ENOMEM; 1019 return -ENOMEM;
1002 1020
1003 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, 1021 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE); 1022 PCI_DMA_FROMDEVICE);
1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1023 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006 __free_pages(page, PAGES_PER_SGE_SHIFT); 1024 __free_pages(page, PAGES_PER_SGE_SHIFT);
@@ -1096,9 +1114,9 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe) 1114 struct eth_fast_path_rx_cqe *fp_cqe)
1097{ 1115{
1098 struct bnx2x *bp = fp->bp; 1116 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) - 1117 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >> 1118 le16_to_cpu(fp_cqe->len_on_bd)) >>
1101 BCM_PAGE_SHIFT; 1119 SGE_PAGE_SHIFT;
1102 u16 last_max, last_elem, first_elem; 1120 u16 last_max, last_elem, first_elem;
1103 u16 delta = 0; 1121 u16 delta = 0;
1104 u16 i; 1122 u16 i;
@@ -1203,22 +1221,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203 u16 cqe_idx) 1221 u16 cqe_idx)
1204{ 1222{
1205 struct sw_rx_page *rx_pg, old_rx_pg; 1223 struct sw_rx_page *rx_pg, old_rx_pg;
1206 struct page *sge;
1207 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); 1224 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208 u32 i, frag_len, frag_size, pages; 1225 u32 i, frag_len, frag_size, pages;
1209 int err; 1226 int err;
1210 int j; 1227 int j;
1211 1228
1212 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd; 1229 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT; 1230 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1214 1231
1215 /* This is needed in order to enable forwarding support */ 1232 /* This is needed in order to enable forwarding support */
1216 if (frag_size) 1233 if (frag_size)
1217 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE, 1234 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1218 max(frag_size, (u32)len_on_bd)); 1235 max(frag_size, (u32)len_on_bd));
1219 1236
1220#ifdef BNX2X_STOP_ON_ERROR 1237#ifdef BNX2X_STOP_ON_ERROR
1221 if (pages > 8*PAGES_PER_SGE) { 1238 if (pages >
1239 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", 1240 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223 pages, cqe_idx); 1241 pages, cqe_idx);
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", 1242 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1234,9 +1252,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1234 1252
1235 /* FW gives the indices of the SGE as if the ring is an array 1253 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */ 1254 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE)); 1255 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx]; 1256 rx_pg = &fp->rx_page_ring[sge_idx];
1239 sge = rx_pg->page;
1240 old_rx_pg = *rx_pg; 1257 old_rx_pg = *rx_pg;
1241 1258
1242 /* If we fail to allocate a substitute page, we simply stop 1259 /* If we fail to allocate a substitute page, we simply stop
@@ -1249,7 +1266,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1249 1266
1250 /* Unmap the page as we r going to pass it to the stack */ 1267 /* Unmap the page as we r going to pass it to the stack */
1251 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), 1268 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1269 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253 1270
1254 /* Add one frag and update the appropriate fields in the skb */ 1271 /* Add one frag and update the appropriate fields in the skb */
1255 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); 1272 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1282,6 +1299,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1282 if (likely(new_skb)) { 1299 if (likely(new_skb)) {
1283 /* fix ip xsum and give it to the stack */ 1300 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */ 1301 /* (no need to map the new skb) */
1302#ifdef BCM_VLAN
1303 int is_vlan_cqe =
1304 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305 PARSING_FLAGS_VLAN);
1306 int is_not_hwaccel_vlan_cqe =
1307 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1308#endif
1285 1309
1286 prefetch(skb); 1310 prefetch(skb);
1287 prefetch(((char *)(skb)) + 128); 1311 prefetch(((char *)(skb)) + 128);
@@ -1306,6 +1330,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1306 struct iphdr *iph; 1330 struct iphdr *iph;
1307 1331
1308 iph = (struct iphdr *)skb->data; 1332 iph = (struct iphdr *)skb->data;
1333#ifdef BCM_VLAN
1334 /* If there is no Rx VLAN offloading -
1335 take VLAN tag into an account */
1336 if (unlikely(is_not_hwaccel_vlan_cqe))
1337 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1338#endif
1309 iph->check = 0; 1339 iph->check = 0;
1310 iph->check = ip_fast_csum((u8 *)iph, iph->ihl); 1340 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311 } 1341 }
@@ -1313,9 +1343,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1313 if (!bnx2x_fill_frag_skb(bp, fp, skb, 1343 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314 &cqe->fast_path_cqe, cqe_idx)) { 1344 &cqe->fast_path_cqe, cqe_idx)) {
1315#ifdef BCM_VLAN 1345#ifdef BCM_VLAN
1316 if ((bp->vlgrp != NULL) && 1346 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1317 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & 1347 (!is_not_hwaccel_vlan_cqe))
1318 PARSING_FLAGS_VLAN))
1319 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1348 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320 le16_to_cpu(cqe->fast_path_cqe. 1349 le16_to_cpu(cqe->fast_path_cqe.
1321 vlan_tag)); 1350 vlan_tag));
@@ -1355,11 +1384,23 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1355 rx_prods.cqe_prod = rx_comp_prod; 1384 rx_prods.cqe_prod = rx_comp_prod;
1356 rx_prods.sge_prod = rx_sge_prod; 1385 rx_prods.sge_prod = rx_sge_prod;
1357 1386
1387 /*
1388 * Make sure that the BD and SGE data is updated before updating the
1389 * producers since FW might read the BD/SGE right after the producer
1390 * is updated.
1391 * This is only applicable for weak-ordered memory model archs such
1392 * as IA-64. The following barrier is also mandatory since FW will
1393 * assumes BDs must have buffers.
1394 */
1395 wmb();
1396
1358 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++) 1397 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1359 REG_WR(bp, BAR_TSTRORM_INTMEM + 1398 REG_WR(bp, BAR_TSTRORM_INTMEM +
1360 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4, 1399 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1361 ((u32 *)&rx_prods)[i]); 1400 ((u32 *)&rx_prods)[i]);
1362 1401
1402 mmiowb(); /* keep prod updates ordered */
1403
1363 DP(NETIF_MSG_RX_STATUS, 1404 DP(NETIF_MSG_RX_STATUS,
1364 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n", 1405 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1365 bd_prod, rx_comp_prod, rx_sge_prod); 1406 bd_prod, rx_comp_prod, rx_sge_prod);
@@ -1415,7 +1456,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1415 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" 1456 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1416 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), 1457 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1417 cqe_fp_flags, cqe->fast_path_cqe.status_flags, 1458 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418 cqe->fast_path_cqe.rss_hash_result, 1459 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1419 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), 1460 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420 le16_to_cpu(cqe->fast_path_cqe.pkt_len)); 1461 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1421 1462
@@ -1547,7 +1588,7 @@ reuse_rx:
1547 } 1588 }
1548 1589
1549#ifdef BCM_VLAN 1590#ifdef BCM_VLAN
1550 if ((bp->vlgrp != NULL) && 1591 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1551 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & 1592 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552 PARSING_FLAGS_VLAN)) 1593 PARSING_FLAGS_VLAN))
1553 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1594 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
@@ -1580,7 +1621,6 @@ next_cqe:
1580 /* Update producers */ 1621 /* Update producers */
1581 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, 1622 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1582 fp->rx_sge_prod); 1623 fp->rx_sge_prod);
1583 mmiowb(); /* keep prod updates ordered */
1584 1624
1585 fp->rx_pkt += rx_pkt; 1625 fp->rx_pkt += rx_pkt;
1586 fp->rx_calls++; 1626 fp->rx_calls++;
@@ -1660,7 +1700,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1660 1700
1661 1701
1662 if (unlikely(status & 0x1)) { 1702 if (unlikely(status & 0x1)) {
1663 schedule_work(&bp->sp_task); 1703 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1664 1704
1665 status &= ~0x1; 1705 status &= ~0x1;
1666 if (!status) 1706 if (!status)
@@ -1887,7 +1927,8 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1887 1927
1888static void bnx2x_calc_fc_adv(struct bnx2x *bp) 1928static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1889{ 1929{
1890 switch (bp->link_vars.ieee_fc) { 1930 switch (bp->link_vars.ieee_fc &
1931 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1891 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 1932 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1892 bp->port.advertising &= ~(ADVERTISED_Asym_Pause | 1933 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1893 ADVERTISED_Pause); 1934 ADVERTISED_Pause);
@@ -1957,10 +1998,11 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1957 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1998 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1958 bnx2x_release_phy_lock(bp); 1999 bnx2x_release_phy_lock(bp);
1959 2000
2001 bnx2x_calc_fc_adv(bp);
2002
1960 if (bp->link_vars.link_up) 2003 if (bp->link_vars.link_up)
1961 bnx2x_link_report(bp); 2004 bnx2x_link_report(bp);
1962 2005
1963 bnx2x_calc_fc_adv(bp);
1964 2006
1965 return rc; 2007 return rc;
1966 } 2008 }
@@ -2220,9 +2262,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2220 /* Make sure that we are synced with the current statistics */ 2262 /* Make sure that we are synced with the current statistics */
2221 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2263 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2222 2264
2223 bnx2x_acquire_phy_lock(bp);
2224 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2265 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2225 bnx2x_release_phy_lock(bp);
2226 2266
2227 if (bp->link_vars.link_up) { 2267 if (bp->link_vars.link_up) {
2228 2268
@@ -2471,6 +2511,8 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2471 if (asserted & ATTN_HARD_WIRED_MASK) { 2511 if (asserted & ATTN_HARD_WIRED_MASK) {
2472 if (asserted & ATTN_NIG_FOR_FUNC) { 2512 if (asserted & ATTN_NIG_FOR_FUNC) {
2473 2513
2514 bnx2x_acquire_phy_lock(bp);
2515
2474 /* save nig interrupt mask */ 2516 /* save nig interrupt mask */
2475 bp->nig_mask = REG_RD(bp, nig_int_mask_addr); 2517 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2476 REG_WR(bp, nig_int_mask_addr, 0); 2518 REG_WR(bp, nig_int_mask_addr, 0);
@@ -2526,8 +2568,10 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2526 REG_WR(bp, hc_addr, asserted); 2568 REG_WR(bp, hc_addr, asserted);
2527 2569
2528 /* now set back the mask */ 2570 /* now set back the mask */
2529 if (asserted & ATTN_NIG_FOR_FUNC) 2571 if (asserted & ATTN_NIG_FOR_FUNC) {
2530 REG_WR(bp, nig_int_mask_addr, bp->nig_mask); 2572 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2573 bnx2x_release_phy_lock(bp);
2574 }
2531} 2575}
2532 2576
2533static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 2577static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -2795,8 +2839,10 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2795static void bnx2x_attn_int(struct bnx2x *bp) 2839static void bnx2x_attn_int(struct bnx2x *bp)
2796{ 2840{
2797 /* read local copy of bits */ 2841 /* read local copy of bits */
2798 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits; 2842 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2799 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack; 2843 attn_bits);
2844 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2845 attn_bits_ack);
2800 u32 attn_state = bp->attn_state; 2846 u32 attn_state = bp->attn_state;
2801 2847
2802 /* look for changed bits */ 2848 /* look for changed bits */
@@ -2820,7 +2866,7 @@ static void bnx2x_attn_int(struct bnx2x *bp)
2820 2866
2821static void bnx2x_sp_task(struct work_struct *work) 2867static void bnx2x_sp_task(struct work_struct *work)
2822{ 2868{
2823 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task); 2869 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2824 u16 status; 2870 u16 status;
2825 2871
2826 2872
@@ -2844,7 +2890,7 @@ static void bnx2x_sp_task(struct work_struct *work)
2844 if (status & 0x2) 2890 if (status & 0x2)
2845 bp->stats_pending = 0; 2891 bp->stats_pending = 0;
2846 2892
2847 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx, 2893 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2848 IGU_INT_NOP, 1); 2894 IGU_INT_NOP, 1);
2849 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx), 2895 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2850 IGU_INT_NOP, 1); 2896 IGU_INT_NOP, 1);
@@ -2875,7 +2921,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2875 return IRQ_HANDLED; 2921 return IRQ_HANDLED;
2876#endif 2922#endif
2877 2923
2878 schedule_work(&bp->sp_task); 2924 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2879 2925
2880 return IRQ_HANDLED; 2926 return IRQ_HANDLED;
2881} 2927}
@@ -2892,7 +2938,7 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2892#define ADD_64(s_hi, a_hi, s_lo, a_lo) \ 2938#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2893 do { \ 2939 do { \
2894 s_lo += a_lo; \ 2940 s_lo += a_lo; \
2895 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \ 2941 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2896 } while (0) 2942 } while (0)
2897 2943
2898/* difference = minuend - subtrahend */ 2944/* difference = minuend - subtrahend */
@@ -4496,7 +4542,7 @@ static void bnx2x_init_context(struct bnx2x *bp)
4496 4542
4497static void bnx2x_init_ind_table(struct bnx2x *bp) 4543static void bnx2x_init_ind_table(struct bnx2x *bp)
4498{ 4544{
4499 int port = BP_PORT(bp); 4545 int func = BP_FUNC(bp);
4500 int i; 4546 int i;
4501 4547
4502 if (!is_multi(bp)) 4548 if (!is_multi(bp))
@@ -4505,10 +4551,8 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
4505 DP(NETIF_MSG_IFUP, "Initializing indirection table\n"); 4551 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4506 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 4552 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4507 REG_WR8(bp, BAR_TSTRORM_INTMEM + 4553 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4508 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i, 4554 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4509 i % bp->num_queues); 4555 BP_CL_ID(bp) + (i % bp->num_queues));
4510
4511 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4512} 4556}
4513 4557
4514static void bnx2x_set_client_config(struct bnx2x *bp) 4558static void bnx2x_set_client_config(struct bnx2x *bp)
@@ -4517,12 +4561,12 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4517 int port = BP_PORT(bp); 4561 int port = BP_PORT(bp);
4518 int i; 4562 int i;
4519 4563
4520 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD; 4564 tstorm_client.mtu = bp->dev->mtu;
4521 tstorm_client.statistics_counter_id = BP_CL_ID(bp); 4565 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4522 tstorm_client.config_flags = 4566 tstorm_client.config_flags =
4523 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; 4567 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4524#ifdef BCM_VLAN 4568#ifdef BCM_VLAN
4525 if (bp->rx_mode && bp->vlgrp) { 4569 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4526 tstorm_client.config_flags |= 4570 tstorm_client.config_flags |=
4527 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; 4571 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4528 DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); 4572 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -4531,7 +4575,7 @@ static void bnx2x_set_client_config(struct bnx2x *bp)
4531 4575
4532 if (bp->flags & TPA_ENABLE_FLAG) { 4576 if (bp->flags & TPA_ENABLE_FLAG) {
4533 tstorm_client.max_sges_for_packet = 4577 tstorm_client.max_sges_for_packet =
4534 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT; 4578 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4535 tstorm_client.max_sges_for_packet = 4579 tstorm_client.max_sges_for_packet =
4536 ((tstorm_client.max_sges_for_packet + 4580 ((tstorm_client.max_sges_for_packet +
4537 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> 4581 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
@@ -4714,10 +4758,11 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
4714 bp->e1hov); 4758 bp->e1hov);
4715 } 4759 }
4716 4760
4717 /* Init CQ ring mapping and aggregation size */ 4761 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4718 max_agg_size = min((u32)(bp->rx_buf_size + 4762 max_agg_size =
4719 8*BCM_PAGE_SIZE*PAGES_PER_SGE), 4763 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4720 (u32)0xffff); 4764 SGE_PAGE_SIZE * PAGES_PER_SGE),
4765 (u32)0xffff);
4721 for_each_queue(bp, i) { 4766 for_each_queue(bp, i) {
4722 struct bnx2x_fastpath *fp = &bp->fp[i]; 4767 struct bnx2x_fastpath *fp = &bp->fp[i];
4723 4768
@@ -4785,6 +4830,15 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4785 bnx2x_init_context(bp); 4830 bnx2x_init_context(bp);
4786 bnx2x_init_internal(bp, load_code); 4831 bnx2x_init_internal(bp, load_code);
4787 bnx2x_init_ind_table(bp); 4832 bnx2x_init_ind_table(bp);
4833 bnx2x_stats_init(bp);
4834
4835 /* At this point, we are ready for interrupts */
4836 atomic_set(&bp->intr_sem, 0);
4837
4838 /* flush all before enabling interrupts */
4839 mb();
4840 mmiowb();
4841
4788 bnx2x_int_enable(bp); 4842 bnx2x_int_enable(bp);
4789} 4843}
4790 4844
@@ -5101,12 +5155,21 @@ static void enable_blocks_attention(struct bnx2x *bp)
5101} 5155}
5102 5156
5103 5157
5158static void bnx2x_reset_common(struct bnx2x *bp)
5159{
5160 /* reset_common */
5161 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5162 0xd3ffff7f);
5163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5164}
5165
5104static int bnx2x_init_common(struct bnx2x *bp) 5166static int bnx2x_init_common(struct bnx2x *bp)
5105{ 5167{
5106 u32 val, i; 5168 u32 val, i;
5107 5169
5108 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 5170 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5109 5171
5172 bnx2x_reset_common(bp);
5110 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); 5173 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5111 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc); 5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5112 5175
@@ -5134,7 +5197,6 @@ static int bnx2x_init_common(struct bnx2x *bp)
5134 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); 5197 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5135 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); 5198 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); 5199 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5138 5200
5139/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ 5201/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5140 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); 5202 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
@@ -5212,6 +5274,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
5212 } 5274 }
5213 5275
5214 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END); 5276 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5277 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5215 /* set NIC mode */ 5278 /* set NIC mode */
5216 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5279 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5217 if (CHIP_IS_E1H(bp)) 5280 if (CHIP_IS_E1H(bp))
@@ -6087,8 +6150,8 @@ static void bnx2x_netif_start(struct bnx2x *bp)
6087static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) 6150static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6088{ 6151{
6089 bnx2x_int_disable_sync(bp, disable_hw); 6152 bnx2x_int_disable_sync(bp, disable_hw);
6153 bnx2x_napi_disable(bp);
6090 if (netif_running(bp->dev)) { 6154 if (netif_running(bp->dev)) {
6091 bnx2x_napi_disable(bp);
6092 netif_tx_disable(bp->dev); 6155 netif_tx_disable(bp->dev);
6093 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6156 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6094 } 6157 }
@@ -6108,7 +6171,7 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6108 * multicast 64-127:port0 128-191:port1 6171 * multicast 64-127:port0 128-191:port1
6109 */ 6172 */
6110 config->hdr.length_6b = 2; 6173 config->hdr.length_6b = 2;
6111 config->hdr.offset = port ? 31 : 0; 6174 config->hdr.offset = port ? 32 : 0;
6112 config->hdr.client_id = BP_CL_ID(bp); 6175 config->hdr.client_id = BP_CL_ID(bp);
6113 config->hdr.reserved1 = 0; 6176 config->hdr.reserved1 = 0;
6114 6177
@@ -6272,7 +6335,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev);
6272static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 6335static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6273{ 6336{
6274 u32 load_code; 6337 u32 load_code;
6275 int i, rc; 6338 int i, rc = 0;
6276#ifdef BNX2X_STOP_ON_ERROR 6339#ifdef BNX2X_STOP_ON_ERROR
6277 if (unlikely(bp->panic)) 6340 if (unlikely(bp->panic))
6278 return -EPERM; 6341 return -EPERM;
@@ -6280,48 +6343,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6280 6343
6281 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 6344 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6282 6345
6283 /* Send LOAD_REQUEST command to MCP
6284 Returns the type of LOAD command:
6285 if it is the first port to be initialized
6286 common blocks should be initialized, otherwise - not
6287 */
6288 if (!BP_NOMCP(bp)) {
6289 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6290 if (!load_code) {
6291 BNX2X_ERR("MCP response failure, aborting\n");
6292 return -EBUSY;
6293 }
6294 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6295 return -EBUSY; /* other port in diagnostic mode */
6296
6297 } else {
6298 int port = BP_PORT(bp);
6299
6300 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6301 load_count[0], load_count[1], load_count[2]);
6302 load_count[0]++;
6303 load_count[1 + port]++;
6304 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6305 load_count[0], load_count[1], load_count[2]);
6306 if (load_count[0] == 1)
6307 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6308 else if (load_count[1 + port] == 1)
6309 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6310 else
6311 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6312 }
6313
6314 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6315 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6316 bp->port.pmf = 1;
6317 else
6318 bp->port.pmf = 0;
6319 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6320
6321 /* if we can't use MSI-X we only need one fp,
6322 * so try to enable MSI-X with the requested number of fp's
6323 * and fallback to inta with one fp
6324 */
6325 if (use_inta) { 6346 if (use_inta) {
6326 bp->num_queues = 1; 6347 bp->num_queues = 1;
6327 6348
@@ -6336,7 +6357,15 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6336 else 6357 else
6337 bp->num_queues = 1; 6358 bp->num_queues = 1;
6338 6359
6339 if (bnx2x_enable_msix(bp)) { 6360 DP(NETIF_MSG_IFUP,
6361 "set number of queues to %d\n", bp->num_queues);
6362
6363 /* if we can't use MSI-X we only need one fp,
6364 * so try to enable MSI-X with the requested number of fp's
6365 * and fallback to MSI or legacy INTx with one fp
6366 */
6367 rc = bnx2x_enable_msix(bp);
6368 if (rc) {
6340 /* failed to enable MSI-X */ 6369 /* failed to enable MSI-X */
6341 bp->num_queues = 1; 6370 bp->num_queues = 1;
6342 if (use_multi) 6371 if (use_multi)
@@ -6344,8 +6373,6 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6344 " to enable MSI-X\n"); 6373 " to enable MSI-X\n");
6345 } 6374 }
6346 } 6375 }
6347 DP(NETIF_MSG_IFUP,
6348 "set number of queues to %d\n", bp->num_queues);
6349 6376
6350 if (bnx2x_alloc_mem(bp)) 6377 if (bnx2x_alloc_mem(bp))
6351 return -ENOMEM; 6378 return -ENOMEM;
@@ -6354,30 +6381,85 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6354 bnx2x_fp(bp, i, disable_tpa) = 6381 bnx2x_fp(bp, i, disable_tpa) =
6355 ((bp->flags & TPA_ENABLE_FLAG) == 0); 6382 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6356 6383
6384 for_each_queue(bp, i)
6385 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6386 bnx2x_poll, 128);
6387
6388#ifdef BNX2X_STOP_ON_ERROR
6389 for_each_queue(bp, i) {
6390 struct bnx2x_fastpath *fp = &bp->fp[i];
6391
6392 fp->poll_no_work = 0;
6393 fp->poll_calls = 0;
6394 fp->poll_max_calls = 0;
6395 fp->poll_complete = 0;
6396 fp->poll_exit = 0;
6397 }
6398#endif
6399 bnx2x_napi_enable(bp);
6400
6357 if (bp->flags & USING_MSIX_FLAG) { 6401 if (bp->flags & USING_MSIX_FLAG) {
6358 rc = bnx2x_req_msix_irqs(bp); 6402 rc = bnx2x_req_msix_irqs(bp);
6359 if (rc) { 6403 if (rc) {
6360 pci_disable_msix(bp->pdev); 6404 pci_disable_msix(bp->pdev);
6361 goto load_error; 6405 goto load_error1;
6362 } 6406 }
6407 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
6363 } else { 6408 } else {
6364 bnx2x_ack_int(bp); 6409 bnx2x_ack_int(bp);
6365 rc = bnx2x_req_irq(bp); 6410 rc = bnx2x_req_irq(bp);
6366 if (rc) { 6411 if (rc) {
6367 BNX2X_ERR("IRQ request failed, aborting\n"); 6412 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6368 goto load_error; 6413 goto load_error1;
6369 } 6414 }
6370 } 6415 }
6371 6416
6372 for_each_queue(bp, i) 6417 /* Send LOAD_REQUEST command to MCP
6373 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 6418 Returns the type of LOAD command:
6374 bnx2x_poll, 128); 6419 if it is the first port to be initialized
6420 common blocks should be initialized, otherwise - not
6421 */
6422 if (!BP_NOMCP(bp)) {
6423 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6424 if (!load_code) {
6425 BNX2X_ERR("MCP response failure, aborting\n");
6426 rc = -EBUSY;
6427 goto load_error2;
6428 }
6429 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6430 rc = -EBUSY; /* other port in diagnostic mode */
6431 goto load_error2;
6432 }
6433
6434 } else {
6435 int port = BP_PORT(bp);
6436
6437 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6438 load_count[0], load_count[1], load_count[2]);
6439 load_count[0]++;
6440 load_count[1 + port]++;
6441 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6442 load_count[0], load_count[1], load_count[2]);
6443 if (load_count[0] == 1)
6444 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6445 else if (load_count[1 + port] == 1)
6446 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6447 else
6448 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6449 }
6450
6451 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6452 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6453 bp->port.pmf = 1;
6454 else
6455 bp->port.pmf = 0;
6456 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6375 6457
6376 /* Initialize HW */ 6458 /* Initialize HW */
6377 rc = bnx2x_init_hw(bp, load_code); 6459 rc = bnx2x_init_hw(bp, load_code);
6378 if (rc) { 6460 if (rc) {
6379 BNX2X_ERR("HW init failed, aborting\n"); 6461 BNX2X_ERR("HW init failed, aborting\n");
6380 goto load_int_disable; 6462 goto load_error2;
6381 } 6463 }
6382 6464
6383 /* Setup NIC internals and enable interrupts */ 6465 /* Setup NIC internals and enable interrupts */
@@ -6389,25 +6471,16 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6389 if (!load_code) { 6471 if (!load_code) {
6390 BNX2X_ERR("MCP response failure, aborting\n"); 6472 BNX2X_ERR("MCP response failure, aborting\n");
6391 rc = -EBUSY; 6473 rc = -EBUSY;
6392 goto load_rings_free; 6474 goto load_error3;
6393 } 6475 }
6394 } 6476 }
6395 6477
6396 bnx2x_stats_init(bp);
6397
6398 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 6478 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6399 6479
6400 /* Enable Rx interrupt handling before sending the ramrod
6401 as it's completed on Rx FP queue */
6402 bnx2x_napi_enable(bp);
6403
6404 /* Enable interrupt handling */
6405 atomic_set(&bp->intr_sem, 0);
6406
6407 rc = bnx2x_setup_leading(bp); 6480 rc = bnx2x_setup_leading(bp);
6408 if (rc) { 6481 if (rc) {
6409 BNX2X_ERR("Setup leading failed!\n"); 6482 BNX2X_ERR("Setup leading failed!\n");
6410 goto load_netif_stop; 6483 goto load_error3;
6411 } 6484 }
6412 6485
6413 if (CHIP_IS_E1H(bp)) 6486 if (CHIP_IS_E1H(bp))
@@ -6420,7 +6493,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6420 for_each_nondefault_queue(bp, i) { 6493 for_each_nondefault_queue(bp, i) {
6421 rc = bnx2x_setup_multi(bp, i); 6494 rc = bnx2x_setup_multi(bp, i);
6422 if (rc) 6495 if (rc)
6423 goto load_netif_stop; 6496 goto load_error3;
6424 } 6497 }
6425 6498
6426 if (CHIP_IS_E1(bp)) 6499 if (CHIP_IS_E1(bp))
@@ -6436,18 +6509,18 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6436 case LOAD_NORMAL: 6509 case LOAD_NORMAL:
6437 /* Tx queue should be only reenabled */ 6510 /* Tx queue should be only reenabled */
6438 netif_wake_queue(bp->dev); 6511 netif_wake_queue(bp->dev);
6512 /* Initialize the receive filter. */
6439 bnx2x_set_rx_mode(bp->dev); 6513 bnx2x_set_rx_mode(bp->dev);
6440 break; 6514 break;
6441 6515
6442 case LOAD_OPEN: 6516 case LOAD_OPEN:
6443 netif_start_queue(bp->dev); 6517 netif_start_queue(bp->dev);
6518 /* Initialize the receive filter. */
6444 bnx2x_set_rx_mode(bp->dev); 6519 bnx2x_set_rx_mode(bp->dev);
6445 if (bp->flags & USING_MSIX_FLAG)
6446 printk(KERN_INFO PFX "%s: using MSI-X\n",
6447 bp->dev->name);
6448 break; 6520 break;
6449 6521
6450 case LOAD_DIAG: 6522 case LOAD_DIAG:
6523 /* Initialize the receive filter. */
6451 bnx2x_set_rx_mode(bp->dev); 6524 bnx2x_set_rx_mode(bp->dev);
6452 bp->state = BNX2X_STATE_DIAG; 6525 bp->state = BNX2X_STATE_DIAG;
6453 break; 6526 break;
@@ -6465,20 +6538,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6465 6538
6466 return 0; 6539 return 0;
6467 6540
6468load_netif_stop: 6541load_error3:
6469 bnx2x_napi_disable(bp); 6542 bnx2x_int_disable_sync(bp, 1);
6470load_rings_free: 6543 if (!BP_NOMCP(bp)) {
6544 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6545 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6546 }
6547 bp->port.pmf = 0;
6471 /* Free SKBs, SGEs, TPA pool and driver internals */ 6548 /* Free SKBs, SGEs, TPA pool and driver internals */
6472 bnx2x_free_skbs(bp); 6549 bnx2x_free_skbs(bp);
6473 for_each_queue(bp, i) 6550 for_each_queue(bp, i)
6474 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6551 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6475load_int_disable: 6552load_error2:
6476 bnx2x_int_disable_sync(bp, 1);
6477 /* Release IRQs */ 6553 /* Release IRQs */
6478 bnx2x_free_irq(bp); 6554 bnx2x_free_irq(bp);
6479load_error: 6555load_error1:
6556 bnx2x_napi_disable(bp);
6557 for_each_queue(bp, i)
6558 netif_napi_del(&bnx2x_fp(bp, i, napi));
6480 bnx2x_free_mem(bp); 6559 bnx2x_free_mem(bp);
6481 bp->port.pmf = 0;
6482 6560
6483 /* TBD we really need to reset the chip 6561 /* TBD we really need to reset the chip
6484 if we want to recover from this */ 6562 if we want to recover from this */
@@ -6551,6 +6629,7 @@ static int bnx2x_stop_leading(struct bnx2x *bp)
6551 } 6629 }
6552 cnt--; 6630 cnt--;
6553 msleep(1); 6631 msleep(1);
6632 rmb(); /* Refresh the dsb_sp_prod */
6554 } 6633 }
6555 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; 6634 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6556 bp->fp[0].state = BNX2X_FP_STATE_CLOSED; 6635 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
@@ -6602,14 +6681,6 @@ static void bnx2x_reset_port(struct bnx2x *bp)
6602 /* TODO: Close Doorbell port? */ 6681 /* TODO: Close Doorbell port? */
6603} 6682}
6604 6683
6605static void bnx2x_reset_common(struct bnx2x *bp)
6606{
6607 /* reset_common */
6608 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6609 0xd3ffff7f);
6610 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6611}
6612
6613static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) 6684static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6614{ 6685{
6615 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n", 6686 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
@@ -6650,20 +6721,22 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6650 bnx2x_set_storm_rx_mode(bp); 6721 bnx2x_set_storm_rx_mode(bp);
6651 6722
6652 bnx2x_netif_stop(bp, 1); 6723 bnx2x_netif_stop(bp, 1);
6653 if (!netif_running(bp->dev)) 6724
6654 bnx2x_napi_disable(bp);
6655 del_timer_sync(&bp->timer); 6725 del_timer_sync(&bp->timer);
6656 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6726 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6657 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); 6727 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6658 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 6728 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6659 6729
6730 /* Release IRQs */
6731 bnx2x_free_irq(bp);
6732
6660 /* Wait until tx fast path tasks complete */ 6733 /* Wait until tx fast path tasks complete */
6661 for_each_queue(bp, i) { 6734 for_each_queue(bp, i) {
6662 struct bnx2x_fastpath *fp = &bp->fp[i]; 6735 struct bnx2x_fastpath *fp = &bp->fp[i];
6663 6736
6664 cnt = 1000; 6737 cnt = 1000;
6665 smp_rmb(); 6738 smp_rmb();
6666 while (BNX2X_HAS_TX_WORK(fp)) { 6739 while (bnx2x_has_tx_work_unload(fp)) {
6667 6740
6668 bnx2x_tx_int(fp, 1000); 6741 bnx2x_tx_int(fp, 1000);
6669 if (!cnt) { 6742 if (!cnt) {
@@ -6684,9 +6757,6 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6684 /* Give HW time to discard old tx messages */ 6757 /* Give HW time to discard old tx messages */
6685 msleep(1); 6758 msleep(1);
6686 6759
6687 /* Release IRQs */
6688 bnx2x_free_irq(bp);
6689
6690 if (CHIP_IS_E1(bp)) { 6760 if (CHIP_IS_E1(bp)) {
6691 struct mac_configuration_cmd *config = 6761 struct mac_configuration_cmd *config =
6692 bnx2x_sp(bp, mcast_config); 6762 bnx2x_sp(bp, mcast_config);
@@ -6795,6 +6865,8 @@ unload_error:
6795 bnx2x_free_skbs(bp); 6865 bnx2x_free_skbs(bp);
6796 for_each_queue(bp, i) 6866 for_each_queue(bp, i)
6797 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6867 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6868 for_each_queue(bp, i)
6869 netif_napi_del(&bnx2x_fp(bp, i, napi));
6798 bnx2x_free_mem(bp); 6870 bnx2x_free_mem(bp);
6799 6871
6800 bp->state = BNX2X_STATE_CLOSED; 6872 bp->state = BNX2X_STATE_CLOSED;
@@ -6847,10 +6919,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6847 */ 6919 */
6848 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); 6920 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6849 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); 6921 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6850 if (val == 0x7)
6851 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6852 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6853
6854 if (val == 0x7) { 6922 if (val == 0x7) {
6855 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6923 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6856 /* save our func */ 6924 /* save our func */
@@ -6858,6 +6926,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6858 u32 swap_en; 6926 u32 swap_en;
6859 u32 swap_val; 6927 u32 swap_val;
6860 6928
6929 /* clear the UNDI indication */
6930 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6931
6861 BNX2X_DEV_INFO("UNDI is active! reset device\n"); 6932 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6862 6933
6863 /* try unload UNDI on port 0 */ 6934 /* try unload UNDI on port 0 */
@@ -6883,6 +6954,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6883 bnx2x_fw_command(bp, reset_code); 6954 bnx2x_fw_command(bp, reset_code);
6884 } 6955 }
6885 6956
6957 /* now it's safe to release the lock */
6958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6959
6886 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 : 6960 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6887 HC_REG_CONFIG_0), 0x1000); 6961 HC_REG_CONFIG_0), 0x1000);
6888 6962
@@ -6927,7 +7001,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6927 bp->fw_seq = 7001 bp->fw_seq =
6928 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & 7002 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6929 DRV_MSG_SEQ_NUMBER_MASK); 7003 DRV_MSG_SEQ_NUMBER_MASK);
6930 } 7004
7005 } else
7006 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6931 } 7007 }
6932} 7008}
6933 7009
@@ -6944,7 +7020,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6944 id |= ((val & 0xf) << 12); 7020 id |= ((val & 0xf) << 12);
6945 val = REG_RD(bp, MISC_REG_CHIP_METAL); 7021 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6946 id |= ((val & 0xff) << 4); 7022 id |= ((val & 0xff) << 4);
6947 REG_RD(bp, MISC_REG_BOND_ID); 7023 val = REG_RD(bp, MISC_REG_BOND_ID);
6948 id |= (val & 0xf); 7024 id |= (val & 0xf);
6949 bp->common.chip_id = id; 7025 bp->common.chip_id = id;
6950 bp->link_params.chip_id = bp->common.chip_id; 7026 bp->link_params.chip_id = bp->common.chip_id;
@@ -7501,7 +7577,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7501 7577
7502 mutex_init(&bp->port.phy_mutex); 7578 mutex_init(&bp->port.phy_mutex);
7503 7579
7504 INIT_WORK(&bp->sp_task, bnx2x_sp_task); 7580 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7505 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 7581 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7506 7582
7507 rc = bnx2x_get_hwinfo(bp); 7583 rc = bnx2x_get_hwinfo(bp);
@@ -8076,6 +8152,9 @@ static int bnx2x_get_eeprom(struct net_device *dev,
8076 struct bnx2x *bp = netdev_priv(dev); 8152 struct bnx2x *bp = netdev_priv(dev);
8077 int rc; 8153 int rc;
8078 8154
8155 if (!netif_running(dev))
8156 return -EAGAIN;
8157
8079 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n" 8158 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8080 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n", 8159 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8081 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset, 8160 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
@@ -8678,18 +8757,17 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8678 8757
8679 if (loopback_mode == BNX2X_MAC_LOOPBACK) { 8758 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8680 bp->link_params.loopback_mode = LOOPBACK_BMAC; 8759 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8681 bnx2x_acquire_phy_lock(bp);
8682 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8760 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8683 bnx2x_release_phy_lock(bp);
8684 8761
8685 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) { 8762 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8763 u16 cnt = 1000;
8686 bp->link_params.loopback_mode = LOOPBACK_XGXS_10; 8764 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8687 bnx2x_acquire_phy_lock(bp);
8688 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 8765 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8689 bnx2x_release_phy_lock(bp);
8690 /* wait until link state is restored */ 8766 /* wait until link state is restored */
8691 bnx2x_wait_for_link(bp, link_up); 8767 if (link_up)
8692 8768 while (cnt-- && bnx2x_test_link(&bp->link_params,
8769 &bp->link_vars))
8770 msleep(10);
8693 } else 8771 } else
8694 return -EINVAL; 8772 return -EINVAL;
8695 8773
@@ -8727,6 +8805,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8727 tx_bd->general_data = ((UNICAST_ADDRESS << 8805 tx_bd->general_data = ((UNICAST_ADDRESS <<
8728 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1); 8806 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8729 8807
8808 wmb();
8809
8730 fp->hw_tx_prods->bds_prod = 8810 fp->hw_tx_prods->bds_prod =
8731 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1); 8811 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8732 mb(); /* FW restriction: must not reorder writing nbd and packets */ 8812 mb(); /* FW restriction: must not reorder writing nbd and packets */
@@ -8778,7 +8858,6 @@ test_loopback_rx_exit:
8778 /* Update producers */ 8858 /* Update producers */
8779 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, 8859 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8780 fp->rx_sge_prod); 8860 fp->rx_sge_prod);
8781 mmiowb(); /* keep prod updates ordered */
8782 8861
8783test_loopback_exit: 8862test_loopback_exit:
8784 bp->link_params.loopback_mode = LOOPBACK_NONE; 8863 bp->link_params.loopback_mode = LOOPBACK_NONE;
@@ -8794,6 +8873,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8794 return BNX2X_LOOPBACK_FAILED; 8873 return BNX2X_LOOPBACK_FAILED;
8795 8874
8796 bnx2x_netif_stop(bp, 1); 8875 bnx2x_netif_stop(bp, 1);
8876 bnx2x_acquire_phy_lock(bp);
8797 8877
8798 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { 8878 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8799 DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); 8879 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
@@ -8805,6 +8885,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8805 rc |= BNX2X_PHY_LOOPBACK_FAILED; 8885 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8806 } 8886 }
8807 8887
8888 bnx2x_release_phy_lock(bp);
8808 bnx2x_netif_start(bp); 8889 bnx2x_netif_start(bp);
8809 8890
8810 return rc; 8891 return rc;
@@ -8878,7 +8959,10 @@ static int bnx2x_test_intr(struct bnx2x *bp)
8878 return -ENODEV; 8959 return -ENODEV;
8879 8960
8880 config->hdr.length_6b = 0; 8961 config->hdr.length_6b = 0;
8881 config->hdr.offset = 0; 8962 if (CHIP_IS_E1(bp))
8963 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8964 else
8965 config->hdr.offset = BP_FUNC(bp);
8882 config->hdr.client_id = BP_CL_ID(bp); 8966 config->hdr.client_id = BP_CL_ID(bp);
8883 config->hdr.reserved1 = 0; 8967 config->hdr.reserved1 = 0;
8884 8968
@@ -9243,6 +9327,18 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9243 return 0; 9327 return 0;
9244} 9328}
9245 9329
9330static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9331{
9332 u16 rx_cons_sb;
9333
9334 /* Tell compiler that status block fields can change */
9335 barrier();
9336 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9337 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9338 rx_cons_sb++;
9339 return (fp->rx_comp_cons != rx_cons_sb);
9340}
9341
9246/* 9342/*
9247 * net_device service functions 9343 * net_device service functions
9248 */ 9344 */
@@ -9253,7 +9349,6 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9253 napi); 9349 napi);
9254 struct bnx2x *bp = fp->bp; 9350 struct bnx2x *bp = fp->bp;
9255 int work_done = 0; 9351 int work_done = 0;
9256 u16 rx_cons_sb;
9257 9352
9258#ifdef BNX2X_STOP_ON_ERROR 9353#ifdef BNX2X_STOP_ON_ERROR
9259 if (unlikely(bp->panic)) 9354 if (unlikely(bp->panic))
@@ -9266,19 +9361,12 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9266 9361
9267 bnx2x_update_fpsb_idx(fp); 9362 bnx2x_update_fpsb_idx(fp);
9268 9363
9269 if (BNX2X_HAS_TX_WORK(fp)) 9364 if (bnx2x_has_tx_work(fp))
9270 bnx2x_tx_int(fp, budget); 9365 bnx2x_tx_int(fp, budget);
9271 9366
9272 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); 9367 if (bnx2x_has_rx_work(fp))
9273 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9274 rx_cons_sb++;
9275 if (BNX2X_HAS_RX_WORK(fp))
9276 work_done = bnx2x_rx_int(fp, budget); 9368 work_done = bnx2x_rx_int(fp, budget);
9277
9278 rmb(); /* BNX2X_HAS_WORK() reads the status block */ 9369 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9279 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9280 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9281 rx_cons_sb++;
9282 9370
9283 /* must not complete if we consumed full budget */ 9371 /* must not complete if we consumed full budget */
9284 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { 9372 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
@@ -9389,6 +9477,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9389 return rc; 9477 return rc;
9390} 9478}
9391 9479
9480#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9392/* check if packet requires linearization (packet is too fragmented) */ 9481/* check if packet requires linearization (packet is too fragmented) */
9393static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, 9482static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9394 u32 xmit_type) 9483 u32 xmit_type)
@@ -9466,6 +9555,7 @@ exit_lbl:
9466 9555
9467 return to_copy; 9556 return to_copy;
9468} 9557}
9558#endif
9469 9559
9470/* called with netif_tx_lock 9560/* called with netif_tx_lock
9471 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 9561 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
@@ -9506,6 +9596,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9506 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 9596 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9507 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 9597 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9508 9598
9599#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9509 /* First, check if we need to linearize the skb 9600 /* First, check if we need to linearize the skb
9510 (due to FW restrictions) */ 9601 (due to FW restrictions) */
9511 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) { 9602 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
@@ -9518,6 +9609,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9518 return NETDEV_TX_OK; 9609 return NETDEV_TX_OK;
9519 } 9610 }
9520 } 9611 }
9612#endif
9521 9613
9522 /* 9614 /*
9523 Please read carefully. First we use one BD which we mark as start, 9615 Please read carefully. First we use one BD which we mark as start,
@@ -9549,11 +9641,14 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9549 "sending pkt %u @%p next_idx %u bd %u @%p\n", 9641 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9550 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd); 9642 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9551 9643
9552 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) { 9644#ifdef BCM_VLAN
9645 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9646 (bp->flags & HW_VLAN_TX_FLAG)) {
9553 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 9647 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9554 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG; 9648 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9555 vlan_off += 4; 9649 vlan_off += 4;
9556 } else 9650 } else
9651#endif
9557 tx_bd->vlan = cpu_to_le16(pkt_prod); 9652 tx_bd->vlan = cpu_to_le16(pkt_prod);
9558 9653
9559 if (xmit_type) { 9654 if (xmit_type) {
@@ -9705,6 +9800,15 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9705 9800
9706 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 9801 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9707 9802
9803 /*
9804 * Make sure that the BD data is updated before updating the producer
9805 * since FW might read the BD right after the producer is updated.
9806 * This is only applicable for weak-ordered memory model archs such
9807 * as IA-64. The following barrier is also mandatory since FW will
9808 * assumes packets must have BDs.
9809 */
9810 wmb();
9811
9708 fp->hw_tx_prods->bds_prod = 9812 fp->hw_tx_prods->bds_prod =
9709 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd); 9813 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9710 mb(); /* FW restriction: must not reorder writing nbd and packets */ 9814 mb(); /* FW restriction: must not reorder writing nbd and packets */
@@ -9718,6 +9822,9 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9718 dev->trans_start = jiffies; 9822 dev->trans_start = jiffies;
9719 9823
9720 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { 9824 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9825 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9826 if we put Tx into XOFF state. */
9827 smp_mb();
9721 netif_stop_queue(dev); 9828 netif_stop_queue(dev);
9722 bp->eth_stats.driver_xoff++; 9829 bp->eth_stats.driver_xoff++;
9723 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 9830 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
@@ -9733,6 +9840,8 @@ static int bnx2x_open(struct net_device *dev)
9733{ 9840{
9734 struct bnx2x *bp = netdev_priv(dev); 9841 struct bnx2x *bp = netdev_priv(dev);
9735 9842
9843 netif_carrier_off(dev);
9844
9736 bnx2x_set_power_state(bp, PCI_D0); 9845 bnx2x_set_power_state(bp, PCI_D0);
9737 9846
9738 return bnx2x_nic_load(bp, LOAD_OPEN); 9847 return bnx2x_nic_load(bp, LOAD_OPEN);
@@ -9816,7 +9925,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
9816 for (; i < old; i++) { 9925 for (; i < old; i++) {
9817 if (CAM_IS_INVALID(config-> 9926 if (CAM_IS_INVALID(config->
9818 config_table[i])) { 9927 config_table[i])) {
9819 i--; /* already invalidated */ 9928 /* already invalidated */
9820 break; 9929 break;
9821 } 9930 }
9822 /* invalidate */ 9931 /* invalidate */
@@ -9987,6 +10096,16 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
9987 struct bnx2x *bp = netdev_priv(dev); 10096 struct bnx2x *bp = netdev_priv(dev);
9988 10097
9989 bp->vlgrp = vlgrp; 10098 bp->vlgrp = vlgrp;
10099
10100 /* Set flags according to the required capabilities */
10101 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10102
10103 if (dev->features & NETIF_F_HW_VLAN_TX)
10104 bp->flags |= HW_VLAN_TX_FLAG;
10105
10106 if (dev->features & NETIF_F_HW_VLAN_RX)
10107 bp->flags |= HW_VLAN_RX_FLAG;
10108
9990 if (netif_running(dev)) 10109 if (netif_running(dev))
9991 bnx2x_set_client_config(bp); 10110 bnx2x_set_client_config(bp);
9992} 10111}
@@ -10143,6 +10262,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10143 dev->features |= NETIF_F_HIGHDMA; 10262 dev->features |= NETIF_F_HIGHDMA;
10144#ifdef BCM_VLAN 10263#ifdef BCM_VLAN
10145 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 10264 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10265 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10146#endif 10266#endif
10147 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 10267 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10148 dev->features |= NETIF_F_TSO6; 10268 dev->features |= NETIF_F_TSO6;
@@ -10215,22 +10335,18 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10215 return rc; 10335 return rc;
10216 } 10336 }
10217 10337
10218 rc = register_netdev(dev);
10219 if (rc) {
10220 dev_err(&pdev->dev, "Cannot register net device\n");
10221 goto init_one_exit;
10222 }
10223
10224 pci_set_drvdata(pdev, dev); 10338 pci_set_drvdata(pdev, dev);
10225 10339
10226 rc = bnx2x_init_bp(bp); 10340 rc = bnx2x_init_bp(bp);
10341 if (rc)
10342 goto init_one_exit;
10343
10344 rc = register_netdev(dev);
10227 if (rc) { 10345 if (rc) {
10228 unregister_netdev(dev); 10346 dev_err(&pdev->dev, "Cannot register net device\n");
10229 goto init_one_exit; 10347 goto init_one_exit;
10230 } 10348 }
10231 10349
10232 netif_carrier_off(dev);
10233
10234 bp->common.name = board_info[ent->driver_data].name; 10350 bp->common.name = board_info[ent->driver_data].name;
10235 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," 10351 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10236 " IRQ %d, ", dev->name, bp->common.name, 10352 " IRQ %d, ", dev->name, bp->common.name,
@@ -10378,6 +10494,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10378 bnx2x_free_skbs(bp); 10494 bnx2x_free_skbs(bp);
10379 for_each_queue(bp, i) 10495 for_each_queue(bp, i)
10380 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 10496 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10497 for_each_queue(bp, i)
10498 netif_napi_del(&bnx2x_fp(bp, i, napi));
10381 bnx2x_free_mem(bp); 10499 bnx2x_free_mem(bp);
10382 10500
10383 bp->state = BNX2X_STATE_CLOSED; 10501 bp->state = BNX2X_STATE_CLOSED;
@@ -10519,12 +10637,20 @@ static struct pci_driver bnx2x_pci_driver = {
10519 10637
10520static int __init bnx2x_init(void) 10638static int __init bnx2x_init(void)
10521{ 10639{
10640 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10641 if (bnx2x_wq == NULL) {
10642 printk(KERN_ERR PFX "Cannot create workqueue\n");
10643 return -ENOMEM;
10644 }
10645
10522 return pci_register_driver(&bnx2x_pci_driver); 10646 return pci_register_driver(&bnx2x_pci_driver);
10523} 10647}
10524 10648
10525static void __exit bnx2x_cleanup(void) 10649static void __exit bnx2x_cleanup(void)
10526{ 10650{
10527 pci_unregister_driver(&bnx2x_pci_driver); 10651 pci_unregister_driver(&bnx2x_pci_driver);
10652
10653 destroy_workqueue(bnx2x_wq);
10528} 10654}
10529 10655
10530module_init(bnx2x_init); 10656module_init(bnx2x_init);
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index a67b0c358ae4..d084e5fc4b51 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -1,6 +1,6 @@
1/* bnx2x_reg.h: Broadcom Everest network driver. 1/* bnx2x_reg.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2007-2008 Broadcom Corporation 3 * Copyright (c) 2007-2009 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 14f9fb3e8795..379a1324db4e 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2104,6 +2104,7 @@ static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2104{ 2104{
2105 lro_mgr->dev = qs->netdev; 2105 lro_mgr->dev = qs->netdev;
2106 lro_mgr->features = LRO_F_NAPI; 2106 lro_mgr->features = LRO_F_NAPI;
2107 lro_mgr->frag_align_pad = NET_IP_ALIGN;
2107 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; 2108 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2108 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; 2109 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2109 lro_mgr->max_desc = T3_MAX_LRO_SES; 2110 lro_mgr->max_desc = T3_MAX_LRO_SES;
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index cf43ee743b3c..0890162953e9 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -981,11 +981,15 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
981 ew32(PBA_ECC, reg); 981 ew32(PBA_ECC, reg);
982 } 982 }
983 983
984 /* PCI-Ex Control Register */ 984 /* PCI-Ex Control Registers */
985 if (hw->mac.type == e1000_82574) { 985 if (hw->mac.type == e1000_82574) {
986 reg = er32(GCR); 986 reg = er32(GCR);
987 reg |= (1 << 22); 987 reg |= (1 << 22);
988 ew32(GCR, reg); 988 ew32(GCR, reg);
989
990 reg = er32(GCR2);
991 reg |= 1;
992 ew32(GCR2, reg);
989 } 993 }
990 994
991 return; 995 return;
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index f25e961c6b3b..2d4ce0492df0 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -206,6 +206,7 @@ enum e1e_registers {
206 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ 206 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
207 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ 207 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
208 E1000_GCR = 0x05B00, /* PCI-Ex Control */ 208 E1000_GCR = 0x05B00, /* PCI-Ex Control */
209 E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
209 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ 210 E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
210 E1000_SWSM = 0x05B50, /* SW Semaphore */ 211 E1000_SWSM = 0x05B50, /* SW Semaphore */
211 E1000_FWSM = 0x05B54, /* FW Semaphore */ 212 E1000_FWSM = 0x05B54, /* FW Semaphore */
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 4e6a9195fe5f..ce900e54d8d1 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -795,6 +795,7 @@ static int fs_enet_open(struct net_device *dev)
795 795
796 err = fs_init_phy(dev); 796 err = fs_init_phy(dev);
797 if (err) { 797 if (err) {
798 free_irq(fep->interrupt, dev);
798 if (fep->fpi->use_napi) 799 if (fep->fpi->use_napi)
799 napi_disable(&fep->napi); 800 napi_disable(&fep->napi);
800 return err; 801 return err;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index efcbeb6c8673..3f7eab42aef1 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1423,15 +1423,11 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1423{ 1423{
1424 struct gfar_private *priv = netdev_priv(dev); 1424 struct gfar_private *priv = netdev_priv(dev);
1425 unsigned long flags; 1425 unsigned long flags;
1426 struct vlan_group *old_grp;
1427 u32 tempval; 1426 u32 tempval;
1428 1427
1429 spin_lock_irqsave(&priv->rxlock, flags); 1428 spin_lock_irqsave(&priv->rxlock, flags);
1430 1429
1431 old_grp = priv->vlgrp; 1430 priv->vlgrp = grp;
1432
1433 if (old_grp == grp)
1434 return;
1435 1431
1436 if (grp) { 1432 if (grp) {
1437 /* Enable VLAN tag insertion */ 1433 /* Enable VLAN tag insertion */
@@ -1622,10 +1618,18 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1622static void gfar_schedule_cleanup(struct net_device *dev) 1618static void gfar_schedule_cleanup(struct net_device *dev)
1623{ 1619{
1624 struct gfar_private *priv = netdev_priv(dev); 1620 struct gfar_private *priv = netdev_priv(dev);
1621 unsigned long flags;
1622
1623 spin_lock_irqsave(&priv->txlock, flags);
1624 spin_lock(&priv->rxlock);
1625
1625 if (netif_rx_schedule_prep(&priv->napi)) { 1626 if (netif_rx_schedule_prep(&priv->napi)) {
1626 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED); 1627 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1627 __netif_rx_schedule(&priv->napi); 1628 __netif_rx_schedule(&priv->napi);
1628 } 1629 }
1630
1631 spin_unlock(&priv->rxlock);
1632 spin_unlock_irqrestore(&priv->txlock, flags);
1629} 1633}
1630 1634
1631/* Interrupt Handler for Transmit complete */ 1635/* Interrupt Handler for Transmit complete */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index ecf9798987fa..2a2fc17b2878 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -613,7 +613,9 @@ static int __devinit mal_probe(struct of_device *ofdev,
613 INIT_LIST_HEAD(&mal->list); 613 INIT_LIST_HEAD(&mal->list);
614 spin_lock_init(&mal->lock); 614 spin_lock_init(&mal->lock);
615 615
616 netif_napi_add(NULL, &mal->napi, mal_poll, 616 init_dummy_netdev(&mal->dummy_dev);
617
618 netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
617 CONFIG_IBM_NEW_EMAC_POLL_WEIGHT); 619 CONFIG_IBM_NEW_EMAC_POLL_WEIGHT);
618 620
619 /* Load power-on reset defaults */ 621 /* Load power-on reset defaults */
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index 2f0a87360844..9ededfbf0726 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -214,6 +214,8 @@ struct mal_instance {
214 int index; 214 int index;
215 spinlock_t lock; 215 spinlock_t lock;
216 216
217 struct net_device dummy_dev;
218
217 unsigned int features; 219 unsigned int features;
218}; 220};
219 221
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 29118f58a141..3a22dc41b656 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1073,7 +1073,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
1073{ 1073{
1074 unsigned int i; 1074 unsigned int i;
1075 int ret; 1075 int ret;
1076 char stir421x_fw_name[11]; 1076 char stir421x_fw_name[12];
1077 const struct firmware *fw; 1077 const struct firmware *fw;
1078 const unsigned char *fw_version_ptr; /* pointer to version string */ 1078 const unsigned char *fw_version_ptr; /* pointer to version string */
1079 unsigned long fw_version = 0; 1079 unsigned long fw_version = 0;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index acef3c65cd2c..d2f4d5f508b7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -318,6 +318,9 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
318 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 318 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
319 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 319 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
320 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 320 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
321 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
322 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
323 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 324 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
322 rx_ring->cpu = cpu; 325 rx_ring->cpu = cpu;
323 } 326 }
@@ -1741,6 +1744,32 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1741 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1744 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1742} 1745}
1743 1746
1747static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1748{
1749 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1750 struct ixgbe_hw *hw = &adapter->hw;
1751
1752 /* add VID to filter table */
1753 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
1754}
1755
1756static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1757{
1758 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1759 struct ixgbe_hw *hw = &adapter->hw;
1760
1761 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1762 ixgbe_irq_disable(adapter);
1763
1764 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1765
1766 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1767 ixgbe_irq_enable(adapter);
1768
1769 /* remove VID from filter table */
1770 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
1771}
1772
1744static void ixgbe_vlan_rx_register(struct net_device *netdev, 1773static void ixgbe_vlan_rx_register(struct net_device *netdev,
1745 struct vlan_group *grp) 1774 struct vlan_group *grp)
1746{ 1775{
@@ -1760,6 +1789,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1760 ctrl |= IXGBE_VLNCTRL_VME; 1789 ctrl |= IXGBE_VLNCTRL_VME;
1761 ctrl &= ~IXGBE_VLNCTRL_CFIEN; 1790 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1762 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl); 1791 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1792 ixgbe_vlan_rx_add_vid(netdev, 0);
1763 1793
1764 if (grp) { 1794 if (grp) {
1765 /* enable VLAN tag insert/strip */ 1795 /* enable VLAN tag insert/strip */
@@ -1773,32 +1803,6 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1773 ixgbe_irq_enable(adapter); 1803 ixgbe_irq_enable(adapter);
1774} 1804}
1775 1805
1776static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1777{
1778 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1779 struct ixgbe_hw *hw = &adapter->hw;
1780
1781 /* add VID to filter table */
1782 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
1783}
1784
1785static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1786{
1787 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1788 struct ixgbe_hw *hw = &adapter->hw;
1789
1790 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1791 ixgbe_irq_disable(adapter);
1792
1793 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1794
1795 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1796 ixgbe_irq_enable(adapter);
1797
1798 /* remove VID from filter table */
1799 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
1800}
1801
1802static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 1806static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1803{ 1807{
1804 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp); 1808 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
@@ -2074,6 +2078,9 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2074 2078
2075 ixgbe_irq_enable(adapter); 2079 ixgbe_irq_enable(adapter);
2076 2080
2081 /* enable transmits */
2082 netif_tx_start_all_queues(netdev);
2083
2077 /* bring the link up in the watchdog, this could race with our first 2084 /* bring the link up in the watchdog, this could race with our first
2078 * link up interrupt but shouldn't be a problem */ 2085 * link up interrupt but shouldn't be a problem */
2079 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 2086 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3475,7 +3482,6 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3475 (FLOW_TX ? "TX" : "None")))); 3482 (FLOW_TX ? "TX" : "None"))));
3476 3483
3477 netif_carrier_on(netdev); 3484 netif_carrier_on(netdev);
3478 netif_tx_wake_all_queues(netdev);
3479 } else { 3485 } else {
3480 /* Force detection of hung controller */ 3486 /* Force detection of hung controller */
3481 adapter->detect_tx_hung = true; 3487 adapter->detect_tx_hung = true;
@@ -3487,7 +3493,6 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3487 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n", 3493 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
3488 netdev->name); 3494 netdev->name);
3489 netif_carrier_off(netdev); 3495 netif_carrier_off(netdev);
3490 netif_tx_stop_all_queues(netdev);
3491 } 3496 }
3492 } 3497 }
3493 3498
@@ -4218,7 +4223,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
4218 } 4223 }
4219 4224
4220 netif_carrier_off(netdev); 4225 netif_carrier_off(netdev);
4221 netif_tx_stop_all_queues(netdev);
4222 4226
4223 strcpy(netdev->name, "eth%d"); 4227 strcpy(netdev->name, "eth%d");
4224 err = register_netdev(netdev); 4228 err = register_netdev(netdev);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 83a11ff9ffd1..f011c57c9205 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -404,6 +404,9 @@
404#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ 404#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
405#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ 405#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
406#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ 406#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
407#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
408#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
409#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
407 410
408#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 411#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
409#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ 412#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 4a5580c1126a..75010cac76ac 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -84,7 +84,10 @@
84#define KORINA_NUM_RDS 64 /* number of receive descriptors */ 84#define KORINA_NUM_RDS 64 /* number of receive descriptors */
85#define KORINA_NUM_TDS 64 /* number of transmit descriptors */ 85#define KORINA_NUM_TDS 64 /* number of transmit descriptors */
86 86
87#define KORINA_RBSIZE 536 /* size of one resource buffer = Ether MTU */ 87/* KORINA_RBSIZE is the hardware's default maximum receive
88 * frame size in bytes. Having this hardcoded means that there
89 * is no support for MTU sizes greater than 1500. */
90#define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */
88#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1) 91#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
89#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1) 92#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
90#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc)) 93#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
@@ -196,7 +199,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
196 struct korina_private *lp = netdev_priv(dev); 199 struct korina_private *lp = netdev_priv(dev);
197 unsigned long flags; 200 unsigned long flags;
198 u32 length; 201 u32 length;
199 u32 chain_index; 202 u32 chain_prev, chain_next;
200 struct dma_desc *td; 203 struct dma_desc *td;
201 204
202 spin_lock_irqsave(&lp->lock, flags); 205 spin_lock_irqsave(&lp->lock, flags);
@@ -228,8 +231,8 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
228 /* Setup the transmit descriptor. */ 231 /* Setup the transmit descriptor. */
229 dma_cache_inv((u32) td, sizeof(*td)); 232 dma_cache_inv((u32) td, sizeof(*td));
230 td->ca = CPHYSADDR(skb->data); 233 td->ca = CPHYSADDR(skb->data);
231 chain_index = (lp->tx_chain_tail - 1) & 234 chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
232 KORINA_TDS_MASK; 235 chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
233 236
234 if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) { 237 if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
235 if (lp->tx_chain_status == desc_empty) { 238 if (lp->tx_chain_status == desc_empty) {
@@ -237,7 +240,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
237 td->control = DMA_COUNT(length) | 240 td->control = DMA_COUNT(length) |
238 DMA_DESC_COF | DMA_DESC_IOF; 241 DMA_DESC_COF | DMA_DESC_IOF;
239 /* Move tail */ 242 /* Move tail */
240 lp->tx_chain_tail = chain_index; 243 lp->tx_chain_tail = chain_next;
241 /* Write to NDPTR */ 244 /* Write to NDPTR */
242 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), 245 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
243 &lp->tx_dma_regs->dmandptr); 246 &lp->tx_dma_regs->dmandptr);
@@ -248,12 +251,12 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
248 td->control = DMA_COUNT(length) | 251 td->control = DMA_COUNT(length) |
249 DMA_DESC_COF | DMA_DESC_IOF; 252 DMA_DESC_COF | DMA_DESC_IOF;
250 /* Link to prev */ 253 /* Link to prev */
251 lp->td_ring[chain_index].control &= 254 lp->td_ring[chain_prev].control &=
252 ~DMA_DESC_COF; 255 ~DMA_DESC_COF;
253 /* Link to prev */ 256 /* Link to prev */
254 lp->td_ring[chain_index].link = CPHYSADDR(td); 257 lp->td_ring[chain_prev].link = CPHYSADDR(td);
255 /* Move tail */ 258 /* Move tail */
256 lp->tx_chain_tail = chain_index; 259 lp->tx_chain_tail = chain_next;
257 /* Write to NDPTR */ 260 /* Write to NDPTR */
258 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), 261 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
259 &(lp->tx_dma_regs->dmandptr)); 262 &(lp->tx_dma_regs->dmandptr));
@@ -267,17 +270,16 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
267 td->control = DMA_COUNT(length) | 270 td->control = DMA_COUNT(length) |
268 DMA_DESC_COF | DMA_DESC_IOF; 271 DMA_DESC_COF | DMA_DESC_IOF;
269 /* Move tail */ 272 /* Move tail */
270 lp->tx_chain_tail = chain_index; 273 lp->tx_chain_tail = chain_next;
271 lp->tx_chain_status = desc_filled; 274 lp->tx_chain_status = desc_filled;
272 netif_stop_queue(dev);
273 } else { 275 } else {
274 /* Update tail */ 276 /* Update tail */
275 td->control = DMA_COUNT(length) | 277 td->control = DMA_COUNT(length) |
276 DMA_DESC_COF | DMA_DESC_IOF; 278 DMA_DESC_COF | DMA_DESC_IOF;
277 lp->td_ring[chain_index].control &= 279 lp->td_ring[chain_prev].control &=
278 ~DMA_DESC_COF; 280 ~DMA_DESC_COF;
279 lp->td_ring[chain_index].link = CPHYSADDR(td); 281 lp->td_ring[chain_prev].link = CPHYSADDR(td);
280 lp->tx_chain_tail = chain_index; 282 lp->tx_chain_tail = chain_next;
281 } 283 }
282 } 284 }
283 dma_cache_wback((u32) td, sizeof(*td)); 285 dma_cache_wback((u32) td, sizeof(*td));
@@ -327,13 +329,13 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
327 329
328 dmas = readl(&lp->rx_dma_regs->dmas); 330 dmas = readl(&lp->rx_dma_regs->dmas);
329 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) { 331 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
330 netif_rx_schedule_prep(&lp->napi);
331
332 dmasm = readl(&lp->rx_dma_regs->dmasm); 332 dmasm = readl(&lp->rx_dma_regs->dmasm);
333 writel(dmasm | (DMA_STAT_DONE | 333 writel(dmasm | (DMA_STAT_DONE |
334 DMA_STAT_HALT | DMA_STAT_ERR), 334 DMA_STAT_HALT | DMA_STAT_ERR),
335 &lp->rx_dma_regs->dmasm); 335 &lp->rx_dma_regs->dmasm);
336 336
337 netif_rx_schedule(&lp->napi);
338
337 if (dmas & DMA_STAT_ERR) 339 if (dmas & DMA_STAT_ERR)
338 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name); 340 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
339 341
@@ -350,15 +352,20 @@ static int korina_rx(struct net_device *dev, int limit)
350 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done]; 352 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
351 struct sk_buff *skb, *skb_new; 353 struct sk_buff *skb, *skb_new;
352 u8 *pkt_buf; 354 u8 *pkt_buf;
353 u32 devcs, pkt_len, dmas, rx_free_desc; 355 u32 devcs, pkt_len, dmas;
354 int count; 356 int count;
355 357
356 dma_cache_inv((u32)rd, sizeof(*rd)); 358 dma_cache_inv((u32)rd, sizeof(*rd));
357 359
358 for (count = 0; count < limit; count++) { 360 for (count = 0; count < limit; count++) {
361 skb = lp->rx_skb[lp->rx_next_done];
362 skb_new = NULL;
359 363
360 devcs = rd->devcs; 364 devcs = rd->devcs;
361 365
366 if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
367 break;
368
362 /* Update statistics counters */ 369 /* Update statistics counters */
363 if (devcs & ETH_RX_CRC) 370 if (devcs & ETH_RX_CRC)
364 dev->stats.rx_crc_errors++; 371 dev->stats.rx_crc_errors++;
@@ -381,63 +388,58 @@ static int korina_rx(struct net_device *dev, int limit)
381 * in Rc32434 (errata ref #077) */ 388 * in Rc32434 (errata ref #077) */
382 dev->stats.rx_errors++; 389 dev->stats.rx_errors++;
383 dev->stats.rx_dropped++; 390 dev->stats.rx_dropped++;
384 } 391 } else if ((devcs & ETH_RX_ROK)) {
385
386 while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
387 /* init the var. used for the later
388 * operations within the while loop */
389 skb_new = NULL;
390 pkt_len = RCVPKT_LENGTH(devcs); 392 pkt_len = RCVPKT_LENGTH(devcs);
391 skb = lp->rx_skb[lp->rx_next_done]; 393
392 394 /* must be the (first and) last
393 if ((devcs & ETH_RX_ROK)) { 395 * descriptor then */
394 /* must be the (first and) last 396 pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
395 * descriptor then */ 397
396 pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; 398 /* invalidate the cache */
397 399 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
398 /* invalidate the cache */ 400
399 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); 401 /* Malloc up new buffer. */
400 402 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
401 /* Malloc up new buffer. */ 403
402 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2); 404 if (!skb_new)
403 405 break;
404 if (!skb_new) 406 /* Do not count the CRC */
405 break; 407 skb_put(skb, pkt_len - 4);
406 /* Do not count the CRC */ 408 skb->protocol = eth_type_trans(skb, dev);
407 skb_put(skb, pkt_len - 4); 409
408 skb->protocol = eth_type_trans(skb, dev); 410 /* Pass the packet to upper layers */
409 411 netif_receive_skb(skb);
410 /* Pass the packet to upper layers */ 412 dev->stats.rx_packets++;
411 netif_receive_skb(skb); 413 dev->stats.rx_bytes += pkt_len;
412 dev->stats.rx_packets++; 414
413 dev->stats.rx_bytes += pkt_len; 415 /* Update the mcast stats */
414 416 if (devcs & ETH_RX_MP)
415 /* Update the mcast stats */ 417 dev->stats.multicast++;
416 if (devcs & ETH_RX_MP) 418
417 dev->stats.multicast++; 419 /* 16 bit align */
418 420 skb_reserve(skb_new, 2);
419 lp->rx_skb[lp->rx_next_done] = skb_new; 421
420 } 422 lp->rx_skb[lp->rx_next_done] = skb_new;
421
422 rd->devcs = 0;
423
424 /* Restore descriptor's curr_addr */
425 if (skb_new)
426 rd->ca = CPHYSADDR(skb_new->data);
427 else
428 rd->ca = CPHYSADDR(skb->data);
429
430 rd->control = DMA_COUNT(KORINA_RBSIZE) |
431 DMA_DESC_COD | DMA_DESC_IOD;
432 lp->rd_ring[(lp->rx_next_done - 1) &
433 KORINA_RDS_MASK].control &=
434 ~DMA_DESC_COD;
435
436 lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
437 dma_cache_wback((u32)rd, sizeof(*rd));
438 rd = &lp->rd_ring[lp->rx_next_done];
439 writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
440 } 423 }
424
425 rd->devcs = 0;
426
427 /* Restore descriptor's curr_addr */
428 if (skb_new)
429 rd->ca = CPHYSADDR(skb_new->data);
430 else
431 rd->ca = CPHYSADDR(skb->data);
432
433 rd->control = DMA_COUNT(KORINA_RBSIZE) |
434 DMA_DESC_COD | DMA_DESC_IOD;
435 lp->rd_ring[(lp->rx_next_done - 1) &
436 KORINA_RDS_MASK].control &=
437 ~DMA_DESC_COD;
438
439 lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
440 dma_cache_wback((u32)rd, sizeof(*rd));
441 rd = &lp->rd_ring[lp->rx_next_done];
442 writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
441 } 443 }
442 444
443 dmas = readl(&lp->rx_dma_regs->dmas); 445 dmas = readl(&lp->rx_dma_regs->dmas);
@@ -623,12 +625,12 @@ korina_tx_dma_interrupt(int irq, void *dev_id)
623 dmas = readl(&lp->tx_dma_regs->dmas); 625 dmas = readl(&lp->tx_dma_regs->dmas);
624 626
625 if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) { 627 if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
626 korina_tx(dev);
627
628 dmasm = readl(&lp->tx_dma_regs->dmasm); 628 dmasm = readl(&lp->tx_dma_regs->dmasm);
629 writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR), 629 writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
630 &lp->tx_dma_regs->dmasm); 630 &lp->tx_dma_regs->dmasm);
631 631
632 korina_tx(dev);
633
632 if (lp->tx_chain_status == desc_filled && 634 if (lp->tx_chain_status == desc_filled &&
633 (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) { 635 (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
634 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), 636 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
@@ -741,6 +743,7 @@ static struct ethtool_ops netdev_ethtool_ops = {
741static void korina_alloc_ring(struct net_device *dev) 743static void korina_alloc_ring(struct net_device *dev)
742{ 744{
743 struct korina_private *lp = netdev_priv(dev); 745 struct korina_private *lp = netdev_priv(dev);
746 struct sk_buff *skb;
744 int i; 747 int i;
745 748
746 /* Initialize the transmit descriptors */ 749 /* Initialize the transmit descriptors */
@@ -756,8 +759,6 @@ static void korina_alloc_ring(struct net_device *dev)
756 759
757 /* Initialize the receive descriptors */ 760 /* Initialize the receive descriptors */
758 for (i = 0; i < KORINA_NUM_RDS; i++) { 761 for (i = 0; i < KORINA_NUM_RDS; i++) {
759 struct sk_buff *skb = lp->rx_skb[i];
760
761 skb = dev_alloc_skb(KORINA_RBSIZE + 2); 762 skb = dev_alloc_skb(KORINA_RBSIZE + 2);
762 if (!skb) 763 if (!skb)
763 break; 764 break;
@@ -770,11 +771,12 @@ static void korina_alloc_ring(struct net_device *dev)
770 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); 771 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
771 } 772 }
772 773
773 /* loop back */ 774 /* loop back receive descriptors, so the last
774 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[0]); 775 * descriptor points to the first one */
775 lp->rx_next_done = 0; 776 lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
777 lp->rd_ring[i - 1].control |= DMA_DESC_COD;
776 778
777 lp->rd_ring[i].control |= DMA_DESC_COD; 779 lp->rx_next_done = 0;
778 lp->rx_chain_head = 0; 780 lp->rx_chain_head = 0;
779 lp->rx_chain_tail = 0; 781 lp->rx_chain_tail = 0;
780 lp->rx_chain_status = desc_empty; 782 lp->rx_chain_status = desc_empty;
@@ -901,6 +903,8 @@ static int korina_restart(struct net_device *dev)
901 903
902 korina_free_ring(dev); 904 korina_free_ring(dev);
903 905
906 napi_disable(&lp->napi);
907
904 ret = korina_init(dev); 908 ret = korina_init(dev);
905 if (ret < 0) { 909 if (ret < 0) {
906 printk(KERN_ERR DRV_NAME "%s: cannot restart device\n", 910 printk(KERN_ERR DRV_NAME "%s: cannot restart device\n",
@@ -999,14 +1003,14 @@ static int korina_open(struct net_device *dev)
999 * that handles the Done Finished 1003 * that handles the Done Finished
1000 * Ovr and Und Events */ 1004 * Ovr and Und Events */
1001 ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt, 1005 ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt,
1002 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Rx", dev); 1006 IRQF_DISABLED, "Korina ethernet Rx", dev);
1003 if (ret < 0) { 1007 if (ret < 0) {
1004 printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n", 1008 printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n",
1005 dev->name, lp->rx_irq); 1009 dev->name, lp->rx_irq);
1006 goto err_release; 1010 goto err_release;
1007 } 1011 }
1008 ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt, 1012 ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt,
1009 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Tx", dev); 1013 IRQF_DISABLED, "Korina ethernet Tx", dev);
1010 if (ret < 0) { 1014 if (ret < 0) {
1011 printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n", 1015 printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n",
1012 dev->name, lp->tx_irq); 1016 dev->name, lp->tx_irq);
@@ -1015,7 +1019,7 @@ static int korina_open(struct net_device *dev)
1015 1019
1016 /* Install handler for overrun error. */ 1020 /* Install handler for overrun error. */
1017 ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt, 1021 ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt,
1018 IRQF_SHARED | IRQF_DISABLED, "Ethernet Overflow", dev); 1022 IRQF_DISABLED, "Ethernet Overflow", dev);
1019 if (ret < 0) { 1023 if (ret < 0) {
1020 printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n", 1024 printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n",
1021 dev->name, lp->ovr_irq); 1025 dev->name, lp->ovr_irq);
@@ -1024,7 +1028,7 @@ static int korina_open(struct net_device *dev)
1024 1028
1025 /* Install handler for underflow error. */ 1029 /* Install handler for underflow error. */
1026 ret = request_irq(lp->und_irq, &korina_und_interrupt, 1030 ret = request_irq(lp->und_irq, &korina_und_interrupt,
1027 IRQF_SHARED | IRQF_DISABLED, "Ethernet Underflow", dev); 1031 IRQF_DISABLED, "Ethernet Underflow", dev);
1028 if (ret < 0) { 1032 if (ret < 0) {
1029 printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n", 1033 printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n",
1030 dev->name, lp->und_irq); 1034 dev->name, lp->und_irq);
@@ -1067,6 +1071,8 @@ static int korina_close(struct net_device *dev)
1067 1071
1068 korina_free_ring(dev); 1072 korina_free_ring(dev);
1069 1073
1074 napi_disable(&lp->napi);
1075
1070 free_irq(lp->rx_irq, dev); 1076 free_irq(lp->rx_irq, dev);
1071 free_irq(lp->tx_irq, dev); 1077 free_irq(lp->tx_irq, dev);
1072 free_irq(lp->ovr_irq, dev); 1078 free_irq(lp->ovr_irq, dev);
@@ -1089,7 +1095,6 @@ static int korina_probe(struct platform_device *pdev)
1089 return -ENOMEM; 1095 return -ENOMEM;
1090 } 1096 }
1091 SET_NETDEV_DEV(dev, &pdev->dev); 1097 SET_NETDEV_DEV(dev, &pdev->dev);
1092 platform_set_drvdata(pdev, dev);
1093 lp = netdev_priv(dev); 1098 lp = netdev_priv(dev);
1094 1099
1095 bif->dev = dev; 1100 bif->dev = dev;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index a04da4ecaa88..f6c4936e2fa8 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -321,6 +321,10 @@ static void macb_tx(struct macb *bp)
321 printk(KERN_ERR "%s: TX underrun, resetting buffers\n", 321 printk(KERN_ERR "%s: TX underrun, resetting buffers\n",
322 bp->dev->name); 322 bp->dev->name);
323 323
324 /* Transfer ongoing, disable transmitter, to avoid confusion */
325 if (status & MACB_BIT(TGO))
326 macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
327
324 head = bp->tx_head; 328 head = bp->tx_head;
325 329
326 /*Mark all the buffer as used to avoid sending a lost buffer*/ 330 /*Mark all the buffer as used to avoid sending a lost buffer*/
@@ -343,6 +347,10 @@ static void macb_tx(struct macb *bp)
343 } 347 }
344 348
345 bp->tx_head = bp->tx_tail = 0; 349 bp->tx_head = bp->tx_tail = 0;
350
351 /* Enable the transmitter again */
352 if (status & MACB_BIT(TGO))
353 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
346 } 354 }
347 355
348 if (!(status & MACB_BIT(COMP))) 356 if (!(status & MACB_BIT(COMP)))
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 919fb9eb1b62..cebdf3243ca1 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -107,9 +107,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
107 profile[MLX4_RES_AUXC].num = request->num_qp; 107 profile[MLX4_RES_AUXC].num = request->num_qp;
108 profile[MLX4_RES_SRQ].num = request->num_srq; 108 profile[MLX4_RES_SRQ].num = request->num_srq;
109 profile[MLX4_RES_CQ].num = request->num_cq; 109 profile[MLX4_RES_CQ].num = request->num_cq;
110 profile[MLX4_RES_EQ].num = min(dev_cap->max_eqs, 110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs,
111 dev_cap->reserved_eqs + 111 dev_cap->reserved_eqs +
112 num_possible_cpus() + 1); 112 num_possible_cpus() + 1);
113 profile[MLX4_RES_DMPT].num = request->num_mpt; 113 profile[MLX4_RES_DMPT].num = request->num_mpt;
114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
115 profile[MLX4_RES_MTT].num = request->num_mtt; 115 profile[MLX4_RES_MTT].num = request->num_mtt;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 7253a499d9c8..5f31bbb614af 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -136,21 +136,23 @@ static char mv643xx_eth_driver_version[] = "1.4";
136/* 136/*
137 * SDMA configuration register. 137 * SDMA configuration register.
138 */ 138 */
139#define RX_BURST_SIZE_4_64BIT (2 << 1)
139#define RX_BURST_SIZE_16_64BIT (4 << 1) 140#define RX_BURST_SIZE_16_64BIT (4 << 1)
140#define BLM_RX_NO_SWAP (1 << 4) 141#define BLM_RX_NO_SWAP (1 << 4)
141#define BLM_TX_NO_SWAP (1 << 5) 142#define BLM_TX_NO_SWAP (1 << 5)
143#define TX_BURST_SIZE_4_64BIT (2 << 22)
142#define TX_BURST_SIZE_16_64BIT (4 << 22) 144#define TX_BURST_SIZE_16_64BIT (4 << 22)
143 145
144#if defined(__BIG_ENDIAN) 146#if defined(__BIG_ENDIAN)
145#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 147#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
146 (RX_BURST_SIZE_16_64BIT | \ 148 (RX_BURST_SIZE_4_64BIT | \
147 TX_BURST_SIZE_16_64BIT) 149 TX_BURST_SIZE_4_64BIT)
148#elif defined(__LITTLE_ENDIAN) 150#elif defined(__LITTLE_ENDIAN)
149#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 151#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
150 (RX_BURST_SIZE_16_64BIT | \ 152 (RX_BURST_SIZE_4_64BIT | \
151 BLM_RX_NO_SWAP | \ 153 BLM_RX_NO_SWAP | \
152 BLM_TX_NO_SWAP | \ 154 BLM_TX_NO_SWAP | \
153 TX_BURST_SIZE_16_64BIT) 155 TX_BURST_SIZE_4_64BIT)
154#else 156#else
155#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 157#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
156#endif 158#endif
@@ -1594,7 +1596,7 @@ oom:
1594 entry = addr_crc(a); 1596 entry = addr_crc(a);
1595 } 1597 }
1596 1598
1597 table[entry >> 2] |= 1 << (entry & 3); 1599 table[entry >> 2] |= 1 << (8 * (entry & 3));
1598 } 1600 }
1599 1601
1600 for (i = 0; i < 0x100; i += 4) { 1602 for (i = 0; i < 0x100; i += 4) {
@@ -2210,6 +2212,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
2210 struct mv643xx_eth_private *mp = netdev_priv(dev); 2212 struct mv643xx_eth_private *mp = netdev_priv(dev);
2211 int i; 2213 int i;
2212 2214
2215 wrlp(mp, INT_MASK_EXT, 0x00000000);
2213 wrlp(mp, INT_MASK, 0x00000000); 2216 wrlp(mp, INT_MASK, 0x00000000);
2214 rdlp(mp, INT_MASK); 2217 rdlp(mp, INT_MASK);
2215 2218
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 6bb71b687f7b..e9c1296b267e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -1,7 +1,7 @@
1/************************************************************************* 1/*************************************************************************
2 * myri10ge.c: Myricom Myri-10G Ethernet driver. 2 * myri10ge.c: Myricom Myri-10G Ethernet driver.
3 * 3 *
4 * Copyright (C) 2005 - 2007 Myricom, Inc. 4 * Copyright (C) 2005 - 2009 Myricom, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.4.4-1.398" 78#define MYRI10GE_VERSION_STR "1.4.4-1.401"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -3786,7 +3786,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3786 if (status != 0) { 3786 if (status != 0) {
3787 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", 3787 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
3788 status); 3788 status);
3789 goto abort_with_netdev; 3789 goto abort_with_enabled;
3790 } 3790 }
3791 3791
3792 pci_set_master(pdev); 3792 pci_set_master(pdev);
@@ -3801,13 +3801,13 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3801 } 3801 }
3802 if (status != 0) { 3802 if (status != 0) {
3803 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3803 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
3804 goto abort_with_netdev; 3804 goto abort_with_enabled;
3805 } 3805 }
3806 (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3806 (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3807 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3807 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
3808 &mgp->cmd_bus, GFP_KERNEL); 3808 &mgp->cmd_bus, GFP_KERNEL);
3809 if (mgp->cmd == NULL) 3809 if (mgp->cmd == NULL)
3810 goto abort_with_netdev; 3810 goto abort_with_enabled;
3811 3811
3812 mgp->board_span = pci_resource_len(pdev, 0); 3812 mgp->board_span = pci_resource_len(pdev, 0);
3813 mgp->iomem_base = pci_resource_start(pdev, 0); 3813 mgp->iomem_base = pci_resource_start(pdev, 0);
@@ -3943,8 +3943,10 @@ abort_with_mtrr:
3943 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3943 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
3944 mgp->cmd, mgp->cmd_bus); 3944 mgp->cmd, mgp->cmd_bus);
3945 3945
3946abort_with_netdev: 3946abort_with_enabled:
3947 pci_disable_device(pdev);
3947 3948
3949abort_with_netdev:
3948 free_netdev(netdev); 3950 free_netdev(netdev);
3949 return status; 3951 return status;
3950} 3952}
@@ -3990,6 +3992,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
3990 mgp->cmd, mgp->cmd_bus); 3992 mgp->cmd, mgp->cmd_bus);
3991 3993
3992 free_netdev(netdev); 3994 free_netdev(netdev);
3995 pci_disable_device(pdev);
3993 pci_set_drvdata(pdev, NULL); 3996 pci_set_drvdata(pdev, NULL);
3994} 3997}
3995 3998
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index f8e601c51da7..a75a31005fd3 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -146,7 +146,7 @@
146 146
147#define MAX_RX_BUFFER_LENGTH 1760 147#define MAX_RX_BUFFER_LENGTH 1760
148#define MAX_RX_JUMBO_BUFFER_LENGTH 8062 148#define MAX_RX_JUMBO_BUFFER_LENGTH 8062
149#define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512) 149#define MAX_RX_LRO_BUFFER_LENGTH (8062)
150#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2) 150#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2)
151#define RX_JUMBO_DMA_MAP_LEN \ 151#define RX_JUMBO_DMA_MAP_LEN \
152 (MAX_RX_JUMBO_BUFFER_LENGTH - 2) 152 (MAX_RX_JUMBO_BUFFER_LENGTH - 2)
@@ -207,11 +207,11 @@
207 207
208#define MAX_CMD_DESCRIPTORS 4096 208#define MAX_CMD_DESCRIPTORS 4096
209#define MAX_RCV_DESCRIPTORS 16384 209#define MAX_RCV_DESCRIPTORS 16384
210#define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4) 210#define MAX_CMD_DESCRIPTORS_HOST 1024
211#define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4) 211#define MAX_RCV_DESCRIPTORS_1G 2048
212#define MAX_RCV_DESCRIPTORS_10G 8192 212#define MAX_RCV_DESCRIPTORS_10G 4096
213#define MAX_JUMBO_RCV_DESCRIPTORS 1024 213#define MAX_JUMBO_RCV_DESCRIPTORS 512
214#define MAX_LRO_RCV_DESCRIPTORS 64 214#define MAX_LRO_RCV_DESCRIPTORS 8
215#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS 215#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
216#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS 216#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
217#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS 217#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
@@ -308,27 +308,16 @@ struct netxen_ring_ctx {
308#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \ 308#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \
309 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0)) 309 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
310 310
311#define netxen_set_cmd_desc_flags(cmd_desc, val) \ 311#define netxen_set_tx_port(_desc, _port) \
312 (cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \ 312 (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)
313 ~cpu_to_le16(0x7f)) | cpu_to_le16((val) & 0x7f) 313
314#define netxen_set_cmd_desc_opcode(cmd_desc, val) \ 314#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \
315 (cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \ 315 (_desc)->flags_opcode = \
316 ~cpu_to_le16((u16)0x3f << 7)) | cpu_to_le16(((val) & 0x3f) << 7) 316 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))
317 317
318#define netxen_set_cmd_desc_num_of_buff(cmd_desc, val) \ 318#define netxen_set_tx_frags_len(_desc, _frags, _len) \
319 (cmd_desc)->num_of_buffers_total_length = \ 319 (_desc)->num_of_buffers_total_length = \
320 ((cmd_desc)->num_of_buffers_total_length & \ 320 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))
321 ~cpu_to_le32(0xff)) | cpu_to_le32((val) & 0xff)
322#define netxen_set_cmd_desc_totallength(cmd_desc, val) \
323 (cmd_desc)->num_of_buffers_total_length = \
324 ((cmd_desc)->num_of_buffers_total_length & \
325 ~cpu_to_le32((u32)0xffffff << 8)) | \
326 cpu_to_le32(((val) & 0xffffff) << 8)
327
328#define netxen_get_cmd_desc_opcode(cmd_desc) \
329 ((le16_to_cpu((cmd_desc)->flags_opcode) >> 7) & 0x003f)
330#define netxen_get_cmd_desc_totallength(cmd_desc) \
331 ((le32_to_cpu((cmd_desc)->num_of_buffers_total_length) >> 8) & 0xffffff)
332 321
333struct cmd_desc_type0 { 322struct cmd_desc_type0 {
334 u8 tcp_hdr_offset; /* For LSO only */ 323 u8 tcp_hdr_offset; /* For LSO only */
@@ -510,7 +499,8 @@ typedef enum {
510 NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a, 499 NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a,
511 NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b, 500 NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b,
512 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, 501 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031,
513 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 502 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032,
503 NETXEN_BRDTYPE_P3_10G_TP = 0x0080
514 504
515} netxen_brdtype_t; 505} netxen_brdtype_t;
516 506
@@ -757,7 +747,7 @@ extern char netxen_nic_driver_name[];
757 */ 747 */
758struct netxen_skb_frag { 748struct netxen_skb_frag {
759 u64 dma; 749 u64 dma;
760 u32 length; 750 ulong length;
761}; 751};
762 752
763#define _netxen_set_bits(config_word, start, bits, val) {\ 753#define _netxen_set_bits(config_word, start, bits, val) {\
@@ -783,13 +773,7 @@ struct netxen_skb_frag {
783struct netxen_cmd_buffer { 773struct netxen_cmd_buffer {
784 struct sk_buff *skb; 774 struct sk_buff *skb;
785 struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; 775 struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
786 u32 total_length; 776 u32 frag_count;
787 u32 mss;
788 u16 port;
789 u8 cmd;
790 u8 frag_count;
791 unsigned long time_stamp;
792 u32 state;
793}; 777};
794 778
795/* In rx_buffer, we do not need multiple fragments as is a single buffer */ 779/* In rx_buffer, we do not need multiple fragments as is a single buffer */
@@ -876,7 +860,6 @@ struct nx_host_rds_ring {
876 u32 skb_size; 860 u32 skb_size;
877 struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */ 861 struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */
878 struct list_head free_list; 862 struct list_head free_list;
879 int begin_alloc;
880}; 863};
881 864
882/* 865/*
@@ -995,31 +978,31 @@ struct netxen_recv_context {
995 */ 978 */
996 979
997typedef struct { 980typedef struct {
998 u64 host_phys_addr; /* Ring base addr */ 981 __le64 host_phys_addr; /* Ring base addr */
999 u32 ring_size; /* Ring entries */ 982 __le32 ring_size; /* Ring entries */
1000 u16 msi_index; 983 __le16 msi_index;
1001 u16 rsvd; /* Padding */ 984 __le16 rsvd; /* Padding */
1002} nx_hostrq_sds_ring_t; 985} nx_hostrq_sds_ring_t;
1003 986
1004typedef struct { 987typedef struct {
1005 u64 host_phys_addr; /* Ring base addr */ 988 __le64 host_phys_addr; /* Ring base addr */
1006 u64 buff_size; /* Packet buffer size */ 989 __le64 buff_size; /* Packet buffer size */
1007 u32 ring_size; /* Ring entries */ 990 __le32 ring_size; /* Ring entries */
1008 u32 ring_kind; /* Class of ring */ 991 __le32 ring_kind; /* Class of ring */
1009} nx_hostrq_rds_ring_t; 992} nx_hostrq_rds_ring_t;
1010 993
1011typedef struct { 994typedef struct {
1012 u64 host_rsp_dma_addr; /* Response dma'd here */ 995 __le64 host_rsp_dma_addr; /* Response dma'd here */
1013 u32 capabilities[4]; /* Flag bit vector */ 996 __le32 capabilities[4]; /* Flag bit vector */
1014 u32 host_int_crb_mode; /* Interrupt crb usage */ 997 __le32 host_int_crb_mode; /* Interrupt crb usage */
1015 u32 host_rds_crb_mode; /* RDS crb usage */ 998 __le32 host_rds_crb_mode; /* RDS crb usage */
1016 /* These ring offsets are relative to data[0] below */ 999 /* These ring offsets are relative to data[0] below */
1017 u32 rds_ring_offset; /* Offset to RDS config */ 1000 __le32 rds_ring_offset; /* Offset to RDS config */
1018 u32 sds_ring_offset; /* Offset to SDS config */ 1001 __le32 sds_ring_offset; /* Offset to SDS config */
1019 u16 num_rds_rings; /* Count of RDS rings */ 1002 __le16 num_rds_rings; /* Count of RDS rings */
1020 u16 num_sds_rings; /* Count of SDS rings */ 1003 __le16 num_sds_rings; /* Count of SDS rings */
1021 u16 rsvd1; /* Padding */ 1004 __le16 rsvd1; /* Padding */
1022 u16 rsvd2; /* Padding */ 1005 __le16 rsvd2; /* Padding */
1023 u8 reserved[128]; /* reserve space for future expansion*/ 1006 u8 reserved[128]; /* reserve space for future expansion*/
1024 /* MUST BE 64-bit aligned. 1007 /* MUST BE 64-bit aligned.
1025 The following is packed: 1008 The following is packed:
@@ -1029,24 +1012,24 @@ typedef struct {
1029} nx_hostrq_rx_ctx_t; 1012} nx_hostrq_rx_ctx_t;
1030 1013
1031typedef struct { 1014typedef struct {
1032 u32 host_producer_crb; /* Crb to use */ 1015 __le32 host_producer_crb; /* Crb to use */
1033 u32 rsvd1; /* Padding */ 1016 __le32 rsvd1; /* Padding */
1034} nx_cardrsp_rds_ring_t; 1017} nx_cardrsp_rds_ring_t;
1035 1018
1036typedef struct { 1019typedef struct {
1037 u32 host_consumer_crb; /* Crb to use */ 1020 __le32 host_consumer_crb; /* Crb to use */
1038 u32 interrupt_crb; /* Crb to use */ 1021 __le32 interrupt_crb; /* Crb to use */
1039} nx_cardrsp_sds_ring_t; 1022} nx_cardrsp_sds_ring_t;
1040 1023
1041typedef struct { 1024typedef struct {
1042 /* These ring offsets are relative to data[0] below */ 1025 /* These ring offsets are relative to data[0] below */
1043 u32 rds_ring_offset; /* Offset to RDS config */ 1026 __le32 rds_ring_offset; /* Offset to RDS config */
1044 u32 sds_ring_offset; /* Offset to SDS config */ 1027 __le32 sds_ring_offset; /* Offset to SDS config */
1045 u32 host_ctx_state; /* Starting State */ 1028 __le32 host_ctx_state; /* Starting State */
1046 u32 num_fn_per_port; /* How many PCI fn share the port */ 1029 __le32 num_fn_per_port; /* How many PCI fn share the port */
1047 u16 num_rds_rings; /* Count of RDS rings */ 1030 __le16 num_rds_rings; /* Count of RDS rings */
1048 u16 num_sds_rings; /* Count of SDS rings */ 1031 __le16 num_sds_rings; /* Count of SDS rings */
1049 u16 context_id; /* Handle for context */ 1032 __le16 context_id; /* Handle for context */
1050 u8 phys_port; /* Physical id of port */ 1033 u8 phys_port; /* Physical id of port */
1051 u8 virt_port; /* Virtual/Logical id of port */ 1034 u8 virt_port; /* Virtual/Logical id of port */
1052 u8 reserved[128]; /* save space for future expansion */ 1035 u8 reserved[128]; /* save space for future expansion */
@@ -1072,34 +1055,34 @@ typedef struct {
1072 */ 1055 */
1073 1056
1074typedef struct { 1057typedef struct {
1075 u64 host_phys_addr; /* Ring base addr */ 1058 __le64 host_phys_addr; /* Ring base addr */
1076 u32 ring_size; /* Ring entries */ 1059 __le32 ring_size; /* Ring entries */
1077 u32 rsvd; /* Padding */ 1060 __le32 rsvd; /* Padding */
1078} nx_hostrq_cds_ring_t; 1061} nx_hostrq_cds_ring_t;
1079 1062
1080typedef struct { 1063typedef struct {
1081 u64 host_rsp_dma_addr; /* Response dma'd here */ 1064 __le64 host_rsp_dma_addr; /* Response dma'd here */
1082 u64 cmd_cons_dma_addr; /* */ 1065 __le64 cmd_cons_dma_addr; /* */
1083 u64 dummy_dma_addr; /* */ 1066 __le64 dummy_dma_addr; /* */
1084 u32 capabilities[4]; /* Flag bit vector */ 1067 __le32 capabilities[4]; /* Flag bit vector */
1085 u32 host_int_crb_mode; /* Interrupt crb usage */ 1068 __le32 host_int_crb_mode; /* Interrupt crb usage */
1086 u32 rsvd1; /* Padding */ 1069 __le32 rsvd1; /* Padding */
1087 u16 rsvd2; /* Padding */ 1070 __le16 rsvd2; /* Padding */
1088 u16 interrupt_ctl; 1071 __le16 interrupt_ctl;
1089 u16 msi_index; 1072 __le16 msi_index;
1090 u16 rsvd3; /* Padding */ 1073 __le16 rsvd3; /* Padding */
1091 nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */ 1074 nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */
1092 u8 reserved[128]; /* future expansion */ 1075 u8 reserved[128]; /* future expansion */
1093} nx_hostrq_tx_ctx_t; 1076} nx_hostrq_tx_ctx_t;
1094 1077
1095typedef struct { 1078typedef struct {
1096 u32 host_producer_crb; /* Crb to use */ 1079 __le32 host_producer_crb; /* Crb to use */
1097 u32 interrupt_crb; /* Crb to use */ 1080 __le32 interrupt_crb; /* Crb to use */
1098} nx_cardrsp_cds_ring_t; 1081} nx_cardrsp_cds_ring_t;
1099 1082
1100typedef struct { 1083typedef struct {
1101 u32 host_ctx_state; /* Starting state */ 1084 __le32 host_ctx_state; /* Starting state */
1102 u16 context_id; /* Handle for context */ 1085 __le16 context_id; /* Handle for context */
1103 u8 phys_port; /* Physical id of port */ 1086 u8 phys_port; /* Physical id of port */
1104 u8 virt_port; /* Virtual/Logical id of port */ 1087 u8 virt_port; /* Virtual/Logical id of port */
1105 nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */ 1088 nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */
@@ -1202,9 +1185,9 @@ enum {
1202#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ 1185#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
1203 1186
1204typedef struct { 1187typedef struct {
1205 u64 qhdr; 1188 __le64 qhdr;
1206 u64 req_hdr; 1189 __le64 req_hdr;
1207 u64 words[6]; 1190 __le64 words[6];
1208} nx_nic_req_t; 1191} nx_nic_req_t;
1209 1192
1210typedef struct { 1193typedef struct {
@@ -1486,8 +1469,6 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter);
1486 1469
1487void netxen_initialize_adapter_ops(struct netxen_adapter *adapter); 1470void netxen_initialize_adapter_ops(struct netxen_adapter *adapter);
1488int netxen_init_firmware(struct netxen_adapter *adapter); 1471int netxen_init_firmware(struct netxen_adapter *adapter);
1489void netxen_tso_check(struct netxen_adapter *adapter,
1490 struct cmd_desc_type0 *desc, struct sk_buff *skb);
1491void netxen_nic_clear_stats(struct netxen_adapter *adapter); 1472void netxen_nic_clear_stats(struct netxen_adapter *adapter);
1492void netxen_watchdog_task(struct work_struct *work); 1473void netxen_watchdog_task(struct work_struct *work);
1493void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, 1474void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
@@ -1496,6 +1477,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter);
1496u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); 1477u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
1497void netxen_p2_nic_set_multi(struct net_device *netdev); 1478void netxen_p2_nic_set_multi(struct net_device *netdev);
1498void netxen_p3_nic_set_multi(struct net_device *netdev); 1479void netxen_p3_nic_set_multi(struct net_device *netdev);
1480void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
1499int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32); 1481int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32);
1500int netxen_config_intr_coalesce(struct netxen_adapter *adapter); 1482int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
1501 1483
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 64b51643c626..746bdb470418 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -76,7 +76,7 @@ netxen_api_unlock(struct netxen_adapter *adapter)
76static u32 76static u32
77netxen_poll_rsp(struct netxen_adapter *adapter) 77netxen_poll_rsp(struct netxen_adapter *adapter)
78{ 78{
79 u32 raw_rsp, rsp = NX_CDRP_RSP_OK; 79 u32 rsp = NX_CDRP_RSP_OK;
80 int timeout = 0; 80 int timeout = 0;
81 81
82 do { 82 do {
@@ -86,10 +86,7 @@ netxen_poll_rsp(struct netxen_adapter *adapter)
86 if (++timeout > NX_OS_CRB_RETRY_COUNT) 86 if (++timeout > NX_OS_CRB_RETRY_COUNT)
87 return NX_CDRP_RSP_TIMEOUT; 87 return NX_CDRP_RSP_TIMEOUT;
88 88
89 netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET, 89 netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET, &rsp);
90 &raw_rsp);
91
92 rsp = le32_to_cpu(raw_rsp);
93 } while (!NX_CDRP_IS_RSP(rsp)); 90 } while (!NX_CDRP_IS_RSP(rsp));
94 91
95 return rsp; 92 return rsp;
@@ -109,20 +106,16 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
109 if (netxen_api_lock(adapter)) 106 if (netxen_api_lock(adapter))
110 return NX_RCODE_TIMEOUT; 107 return NX_RCODE_TIMEOUT;
111 108
112 netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET, 109 netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET, signature);
113 cpu_to_le32(signature));
114 110
115 netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET, 111 netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET, arg1);
116 cpu_to_le32(arg1));
117 112
118 netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET, 113 netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET, arg2);
119 cpu_to_le32(arg2));
120 114
121 netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET, 115 netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET, arg3);
122 cpu_to_le32(arg3));
123 116
124 netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET, 117 netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET,
125 cpu_to_le32(NX_CDRP_FORM_CMD(cmd))); 118 NX_CDRP_FORM_CMD(cmd));
126 119
127 rsp = netxen_poll_rsp(adapter); 120 rsp = netxen_poll_rsp(adapter);
128 121
@@ -133,7 +126,6 @@ netxen_issue_cmd(struct netxen_adapter *adapter,
133 rcode = NX_RCODE_TIMEOUT; 126 rcode = NX_RCODE_TIMEOUT;
134 } else if (rsp == NX_CDRP_RSP_FAIL) { 127 } else if (rsp == NX_CDRP_RSP_FAIL) {
135 netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode); 128 netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode);
136 rcode = le32_to_cpu(rcode);
137 129
138 printk(KERN_ERR "%s: failed card response code:0x%x\n", 130 printk(KERN_ERR "%s: failed card response code:0x%x\n",
139 netxen_nic_driver_name, rcode); 131 netxen_nic_driver_name, rcode);
@@ -183,7 +175,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
183 175
184 int i, nrds_rings, nsds_rings; 176 int i, nrds_rings, nsds_rings;
185 size_t rq_size, rsp_size; 177 size_t rq_size, rsp_size;
186 u32 cap, reg; 178 u32 cap, reg, val;
187 179
188 int err; 180 int err;
189 181
@@ -225,11 +217,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
225 217
226 prq->num_rds_rings = cpu_to_le16(nrds_rings); 218 prq->num_rds_rings = cpu_to_le16(nrds_rings);
227 prq->num_sds_rings = cpu_to_le16(nsds_rings); 219 prq->num_sds_rings = cpu_to_le16(nsds_rings);
228 prq->rds_ring_offset = 0; 220 prq->rds_ring_offset = cpu_to_le32(0);
229 prq->sds_ring_offset = prq->rds_ring_offset + 221
222 val = le32_to_cpu(prq->rds_ring_offset) +
230 (sizeof(nx_hostrq_rds_ring_t) * nrds_rings); 223 (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
224 prq->sds_ring_offset = cpu_to_le32(val);
231 225
232 prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + prq->rds_ring_offset); 226 prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
227 le32_to_cpu(prq->rds_ring_offset));
233 228
234 for (i = 0; i < nrds_rings; i++) { 229 for (i = 0; i < nrds_rings; i++) {
235 230
@@ -241,17 +236,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
241 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); 236 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
242 } 237 }
243 238
244 prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + prq->sds_ring_offset); 239 prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
240 le32_to_cpu(prq->sds_ring_offset));
245 241
246 prq_sds[0].host_phys_addr = 242 prq_sds[0].host_phys_addr =
247 cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr); 243 cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
248 prq_sds[0].ring_size = cpu_to_le32(adapter->max_rx_desc_count); 244 prq_sds[0].ring_size = cpu_to_le32(adapter->max_rx_desc_count);
249 /* only one msix vector for now */ 245 /* only one msix vector for now */
250 prq_sds[0].msi_index = cpu_to_le32(0); 246 prq_sds[0].msi_index = cpu_to_le16(0);
251
252 /* now byteswap offsets */
253 prq->rds_ring_offset = cpu_to_le32(prq->rds_ring_offset);
254 prq->sds_ring_offset = cpu_to_le32(prq->sds_ring_offset);
255 247
256 phys_addr = hostrq_phys_addr; 248 phys_addr = hostrq_phys_addr;
257 err = netxen_issue_cmd(adapter, 249 err = netxen_issue_cmd(adapter,
@@ -269,9 +261,9 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
269 261
270 262
271 prsp_rds = ((nx_cardrsp_rds_ring_t *) 263 prsp_rds = ((nx_cardrsp_rds_ring_t *)
272 &prsp->data[prsp->rds_ring_offset]); 264 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
273 265
274 for (i = 0; i < le32_to_cpu(prsp->num_rds_rings); i++) { 266 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
275 rds_ring = &recv_ctx->rds_rings[i]; 267 rds_ring = &recv_ctx->rds_rings[i];
276 268
277 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 269 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
@@ -279,7 +271,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
279 } 271 }
280 272
281 prsp_sds = ((nx_cardrsp_sds_ring_t *) 273 prsp_sds = ((nx_cardrsp_sds_ring_t *)
282 &prsp->data[prsp->sds_ring_offset]); 274 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
283 reg = le32_to_cpu(prsp_sds[0].host_consumer_crb); 275 reg = le32_to_cpu(prsp_sds[0].host_consumer_crb);
284 recv_ctx->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200); 276 recv_ctx->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
285 277
@@ -288,7 +280,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
288 280
289 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 281 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
290 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 282 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
291 recv_ctx->virt_port = le16_to_cpu(prsp->virt_port); 283 recv_ctx->virt_port = prsp->virt_port;
292 284
293out_free_rsp: 285out_free_rsp:
294 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); 286 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index e45ce2951729..0894a7be0225 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -136,11 +136,9 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
136 136
137 ecmd->port = PORT_TP; 137 ecmd->port = PORT_TP;
138 138
139 if (netif_running(dev)) { 139 ecmd->speed = adapter->link_speed;
140 ecmd->speed = adapter->link_speed; 140 ecmd->duplex = adapter->link_duplex;
141 ecmd->duplex = adapter->link_duplex; 141 ecmd->autoneg = adapter->link_autoneg;
142 ecmd->autoneg = adapter->link_autoneg;
143 }
144 142
145 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { 143 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
146 u32 val; 144 u32 val;
@@ -171,7 +169,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
171 } else 169 } else
172 return -EIO; 170 return -EIO;
173 171
174 ecmd->phy_address = adapter->portnum; 172 ecmd->phy_address = adapter->physical_port;
175 ecmd->transceiver = XCVR_EXTERNAL; 173 ecmd->transceiver = XCVR_EXTERNAL;
176 174
177 switch ((netxen_brdtype_t) boardinfo->board_type) { 175 switch ((netxen_brdtype_t) boardinfo->board_type) {
@@ -180,13 +178,13 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
180 case NETXEN_BRDTYPE_P3_REF_QG: 178 case NETXEN_BRDTYPE_P3_REF_QG:
181 case NETXEN_BRDTYPE_P3_4_GB: 179 case NETXEN_BRDTYPE_P3_4_GB:
182 case NETXEN_BRDTYPE_P3_4_GB_MM: 180 case NETXEN_BRDTYPE_P3_4_GB_MM:
183 case NETXEN_BRDTYPE_P3_10000_BASE_T:
184 181
185 ecmd->supported |= SUPPORTED_Autoneg; 182 ecmd->supported |= SUPPORTED_Autoneg;
186 ecmd->advertising |= ADVERTISED_Autoneg; 183 ecmd->advertising |= ADVERTISED_Autoneg;
187 case NETXEN_BRDTYPE_P2_SB31_10G_CX4: 184 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
188 case NETXEN_BRDTYPE_P3_10G_CX4: 185 case NETXEN_BRDTYPE_P3_10G_CX4:
189 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 186 case NETXEN_BRDTYPE_P3_10G_CX4_LP:
187 case NETXEN_BRDTYPE_P3_10000_BASE_T:
190 ecmd->supported |= SUPPORTED_TP; 188 ecmd->supported |= SUPPORTED_TP;
191 ecmd->advertising |= ADVERTISED_TP; 189 ecmd->advertising |= ADVERTISED_TP;
192 ecmd->port = PORT_TP; 190 ecmd->port = PORT_TP;
@@ -204,16 +202,33 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
204 ecmd->port = PORT_FIBRE; 202 ecmd->port = PORT_FIBRE;
205 ecmd->autoneg = AUTONEG_DISABLE; 203 ecmd->autoneg = AUTONEG_DISABLE;
206 break; 204 break;
207 case NETXEN_BRDTYPE_P2_SB31_10G:
208 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 205 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
209 case NETXEN_BRDTYPE_P3_10G_SFP_CT: 206 case NETXEN_BRDTYPE_P3_10G_SFP_CT:
210 case NETXEN_BRDTYPE_P3_10G_SFP_QT: 207 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
208 ecmd->advertising |= ADVERTISED_TP;
209 ecmd->supported |= SUPPORTED_TP;
210 case NETXEN_BRDTYPE_P2_SB31_10G:
211 case NETXEN_BRDTYPE_P3_10G_XFP: 211 case NETXEN_BRDTYPE_P3_10G_XFP:
212 ecmd->supported |= SUPPORTED_FIBRE; 212 ecmd->supported |= SUPPORTED_FIBRE;
213 ecmd->advertising |= ADVERTISED_FIBRE; 213 ecmd->advertising |= ADVERTISED_FIBRE;
214 ecmd->port = PORT_FIBRE; 214 ecmd->port = PORT_FIBRE;
215 ecmd->autoneg = AUTONEG_DISABLE; 215 ecmd->autoneg = AUTONEG_DISABLE;
216 break; 216 break;
217 case NETXEN_BRDTYPE_P3_10G_TP:
218 if (adapter->ahw.board_type == NETXEN_NIC_XGBE) {
219 ecmd->autoneg = AUTONEG_DISABLE;
220 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
221 ecmd->advertising |=
222 (ADVERTISED_FIBRE | ADVERTISED_TP);
223 ecmd->port = PORT_FIBRE;
224 } else {
225 ecmd->autoneg = AUTONEG_ENABLE;
226 ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg);
227 ecmd->advertising |=
228 (ADVERTISED_TP | ADVERTISED_Autoneg);
229 ecmd->port = PORT_TP;
230 }
231 break;
217 default: 232 default:
218 printk(KERN_ERR "netxen-nic: Unsupported board model %d\n", 233 printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
219 (netxen_brdtype_t) boardinfo->board_type); 234 (netxen_brdtype_t) boardinfo->board_type);
@@ -546,7 +561,10 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
546 } 561 }
547 ring->tx_pending = adapter->max_tx_desc_count; 562 ring->tx_pending = adapter->max_tx_desc_count;
548 563
549 ring->rx_max_pending = MAX_RCV_DESCRIPTORS; 564 if (adapter->ahw.board_type == NETXEN_NIC_GBE)
565 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
566 else
567 ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
550 ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST; 568 ring->tx_max_pending = MAX_CMD_DESCRIPTORS_HOST;
551 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS; 569 ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS;
552 ring->rx_mini_max_pending = 0; 570 ring->rx_mini_max_pending = 0;
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index aa6e603bfcbf..821cff68b3f3 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -503,17 +503,15 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
503 503
504 i = 0; 504 i = 0;
505 505
506 netif_tx_lock_bh(adapter->netdev);
507
506 producer = adapter->cmd_producer; 508 producer = adapter->cmd_producer;
507 do { 509 do {
508 cmd_desc = &cmd_desc_arr[i]; 510 cmd_desc = &cmd_desc_arr[i];
509 511
510 pbuf = &adapter->cmd_buf_arr[producer]; 512 pbuf = &adapter->cmd_buf_arr[producer];
511 pbuf->mss = 0;
512 pbuf->total_length = 0;
513 pbuf->skb = NULL; 513 pbuf->skb = NULL;
514 pbuf->cmd = 0;
515 pbuf->frag_count = 0; 514 pbuf->frag_count = 0;
516 pbuf->port = 0;
517 515
518 /* adapter->ahw.cmd_desc_head[producer] = *cmd_desc; */ 516 /* adapter->ahw.cmd_desc_head[producer] = *cmd_desc; */
519 memcpy(&adapter->ahw.cmd_desc_head[producer], 517 memcpy(&adapter->ahw.cmd_desc_head[producer],
@@ -531,6 +529,8 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
531 529
532 netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer); 530 netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer);
533 531
532 netif_tx_unlock_bh(adapter->netdev);
533
534 return 0; 534 return 0;
535} 535}
536 536
@@ -539,16 +539,19 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev,
539{ 539{
540 struct netxen_adapter *adapter = netdev_priv(dev); 540 struct netxen_adapter *adapter = netdev_priv(dev);
541 nx_nic_req_t req; 541 nx_nic_req_t req;
542 nx_mac_req_t mac_req; 542 nx_mac_req_t *mac_req;
543 u64 word;
543 int rv; 544 int rv;
544 545
545 memset(&req, 0, sizeof(nx_nic_req_t)); 546 memset(&req, 0, sizeof(nx_nic_req_t));
546 req.qhdr |= (NX_NIC_REQUEST << 23); 547 req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
547 req.req_hdr |= NX_MAC_EVENT; 548
548 req.req_hdr |= ((u64)adapter->portnum << 16); 549 word = NX_MAC_EVENT | ((u64)adapter->portnum << 16);
549 mac_req.op = op; 550 req.req_hdr = cpu_to_le64(word);
550 memcpy(&mac_req.mac_addr, addr, 6); 551
551 req.words[0] = cpu_to_le64(*(u64 *)&mac_req); 552 mac_req = (nx_mac_req_t *)&req.words[0];
553 mac_req->op = op;
554 memcpy(mac_req->mac_addr, addr, 6);
552 555
553 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 556 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
554 if (rv != 0) { 557 if (rv != 0) {
@@ -612,18 +615,35 @@ send_fw_cmd:
612int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) 615int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
613{ 616{
614 nx_nic_req_t req; 617 nx_nic_req_t req;
618 u64 word;
615 619
616 memset(&req, 0, sizeof(nx_nic_req_t)); 620 memset(&req, 0, sizeof(nx_nic_req_t));
617 621
618 req.qhdr |= (NX_HOST_REQUEST << 23); 622 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
619 req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE; 623
620 req.req_hdr |= ((u64)adapter->portnum << 16); 624 word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
625 ((u64)adapter->portnum << 16);
626 req.req_hdr = cpu_to_le64(word);
627
621 req.words[0] = cpu_to_le64(mode); 628 req.words[0] = cpu_to_le64(mode);
622 629
623 return netxen_send_cmd_descs(adapter, 630 return netxen_send_cmd_descs(adapter,
624 (struct cmd_desc_type0 *)&req, 1); 631 (struct cmd_desc_type0 *)&req, 1);
625} 632}
626 633
634void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
635{
636 nx_mac_list_t *cur, *next;
637
638 cur = adapter->mac_list;
639
640 while (cur) {
641 next = cur->next;
642 kfree(cur);
643 cur = next;
644 }
645}
646
627#define NETXEN_CONFIG_INTR_COALESCE 3 647#define NETXEN_CONFIG_INTR_COALESCE 3
628 648
629/* 649/*
@@ -632,13 +652,15 @@ int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
632int netxen_config_intr_coalesce(struct netxen_adapter *adapter) 652int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
633{ 653{
634 nx_nic_req_t req; 654 nx_nic_req_t req;
655 u64 word;
635 int rv; 656 int rv;
636 657
637 memset(&req, 0, sizeof(nx_nic_req_t)); 658 memset(&req, 0, sizeof(nx_nic_req_t));
638 659
639 req.qhdr |= (NX_NIC_REQUEST << 23); 660 req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
640 req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; 661
641 req.req_hdr |= ((u64)adapter->portnum << 16); 662 word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
663 req.req_hdr = cpu_to_le64(word);
642 664
643 memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal)); 665 memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal));
644 666
@@ -772,13 +794,10 @@ int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
772 adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4); 794 adapter->hw_read_wx(adapter, crbaddr, &mac_lo, 4);
773 adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4); 795 adapter->hw_read_wx(adapter, crbaddr+4, &mac_hi, 4);
774 796
775 mac_hi = cpu_to_le32(mac_hi);
776 mac_lo = cpu_to_le32(mac_lo);
777
778 if (pci_func & 1) 797 if (pci_func & 1)
779 *mac = ((mac_lo >> 16) | ((u64)mac_hi << 16)); 798 *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
780 else 799 else
781 *mac = ((mac_lo) | ((u64)mac_hi << 32)); 800 *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
782 801
783 return 0; 802 return 0;
784} 803}
@@ -937,7 +956,7 @@ int netxen_load_firmware(struct netxen_adapter *adapter)
937{ 956{
938 int i; 957 int i;
939 u32 data, size = 0; 958 u32 data, size = 0;
940 u32 flashaddr = NETXEN_BOOTLD_START, memaddr = NETXEN_BOOTLD_START; 959 u32 flashaddr = NETXEN_BOOTLD_START;
941 960
942 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START)/4; 961 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START)/4;
943 962
@@ -949,10 +968,8 @@ int netxen_load_firmware(struct netxen_adapter *adapter)
949 if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0) 968 if (netxen_rom_fast_read(adapter, flashaddr, (int *)&data) != 0)
950 return -EIO; 969 return -EIO;
951 970
952 adapter->pci_mem_write(adapter, memaddr, &data, 4); 971 adapter->pci_mem_write(adapter, flashaddr, &data, 4);
953 flashaddr += 4; 972 flashaddr += 4;
954 memaddr += 4;
955 cond_resched();
956 } 973 }
957 msleep(1); 974 msleep(1);
958 975
@@ -2034,7 +2051,13 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2034 rv = -1; 2051 rv = -1;
2035 } 2052 }
2036 2053
2037 DPRINTK(INFO, "Discovered board type:0x%x ", boardinfo->board_type); 2054 if (boardinfo->board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
2055 u32 gpio = netxen_nic_reg_read(adapter,
2056 NETXEN_ROMUSB_GLB_PAD_GPIO_I);
2057 if ((gpio & 0x8000) == 0)
2058 boardinfo->board_type = NETXEN_BRDTYPE_P3_10G_TP;
2059 }
2060
2038 switch ((netxen_brdtype_t) boardinfo->board_type) { 2061 switch ((netxen_brdtype_t) boardinfo->board_type) {
2039 case NETXEN_BRDTYPE_P2_SB35_4G: 2062 case NETXEN_BRDTYPE_P2_SB35_4G:
2040 adapter->ahw.board_type = NETXEN_NIC_GBE; 2063 adapter->ahw.board_type = NETXEN_NIC_GBE;
@@ -2053,7 +2076,6 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2053 case NETXEN_BRDTYPE_P3_10G_SFP_QT: 2076 case NETXEN_BRDTYPE_P3_10G_SFP_QT:
2054 case NETXEN_BRDTYPE_P3_10G_XFP: 2077 case NETXEN_BRDTYPE_P3_10G_XFP:
2055 case NETXEN_BRDTYPE_P3_10000_BASE_T: 2078 case NETXEN_BRDTYPE_P3_10000_BASE_T:
2056
2057 adapter->ahw.board_type = NETXEN_NIC_XGBE; 2079 adapter->ahw.board_type = NETXEN_NIC_XGBE;
2058 break; 2080 break;
2059 case NETXEN_BRDTYPE_P1_BD: 2081 case NETXEN_BRDTYPE_P1_BD:
@@ -2063,9 +2085,12 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter)
2063 case NETXEN_BRDTYPE_P3_REF_QG: 2085 case NETXEN_BRDTYPE_P3_REF_QG:
2064 case NETXEN_BRDTYPE_P3_4_GB: 2086 case NETXEN_BRDTYPE_P3_4_GB:
2065 case NETXEN_BRDTYPE_P3_4_GB_MM: 2087 case NETXEN_BRDTYPE_P3_4_GB_MM:
2066
2067 adapter->ahw.board_type = NETXEN_NIC_GBE; 2088 adapter->ahw.board_type = NETXEN_NIC_GBE;
2068 break; 2089 break;
2090 case NETXEN_BRDTYPE_P3_10G_TP:
2091 adapter->ahw.board_type = (adapter->portnum < 2) ?
2092 NETXEN_NIC_XGBE : NETXEN_NIC_GBE;
2093 break;
2069 default: 2094 default:
2070 printk("%s: Unknown(%x)\n", netxen_nic_driver_name, 2095 printk("%s: Unknown(%x)\n", netxen_nic_driver_name,
2071 boardinfo->board_type); 2096 boardinfo->board_type);
@@ -2110,12 +2135,16 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2110{ 2135{
2111 __u32 status; 2136 __u32 status;
2112 __u32 autoneg; 2137 __u32 autoneg;
2113 __u32 mode;
2114 __u32 port_mode; 2138 __u32 port_mode;
2115 2139
2116 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); 2140 if (!netif_carrier_ok(adapter->netdev)) {
2117 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ 2141 adapter->link_speed = 0;
2142 adapter->link_duplex = -1;
2143 adapter->link_autoneg = AUTONEG_ENABLE;
2144 return;
2145 }
2118 2146
2147 if (adapter->ahw.board_type == NETXEN_NIC_GBE) {
2119 adapter->hw_read_wx(adapter, 2148 adapter->hw_read_wx(adapter,
2120 NETXEN_PORT_MODE_ADDR, &port_mode, 4); 2149 NETXEN_PORT_MODE_ADDR, &port_mode, 4);
2121 if (port_mode == NETXEN_PORT_MODE_802_3_AP) { 2150 if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
@@ -2141,7 +2170,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2141 adapter->link_speed = SPEED_1000; 2170 adapter->link_speed = SPEED_1000;
2142 break; 2171 break;
2143 default: 2172 default:
2144 adapter->link_speed = -1; 2173 adapter->link_speed = 0;
2145 break; 2174 break;
2146 } 2175 }
2147 switch (netxen_get_phy_duplex(status)) { 2176 switch (netxen_get_phy_duplex(status)) {
@@ -2164,7 +2193,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
2164 goto link_down; 2193 goto link_down;
2165 } else { 2194 } else {
2166 link_down: 2195 link_down:
2167 adapter->link_speed = -1; 2196 adapter->link_speed = 0;
2168 adapter->link_duplex = -1; 2197 adapter->link_duplex = -1;
2169 } 2198 }
2170 } 2199 }
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index d924468e506e..ca7c8d8050c9 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -308,7 +308,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
308 } 308 }
309 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); 309 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
310 INIT_LIST_HEAD(&rds_ring->free_list); 310 INIT_LIST_HEAD(&rds_ring->free_list);
311 rds_ring->begin_alloc = 0;
312 /* 311 /*
313 * Now go through all of them, set reference handles 312 * Now go through all of them, set reference handles
314 * and put them in the queues. 313 * and put them in the queues.
@@ -439,6 +438,8 @@ static int netxen_wait_rom_done(struct netxen_adapter *adapter)
439 long timeout = 0; 438 long timeout = 0;
440 long done = 0; 439 long done = 0;
441 440
441 cond_resched();
442
442 while (done == 0) { 443 while (done == 0) {
443 done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS); 444 done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
444 done &= 2; 445 done &= 2;
@@ -533,12 +534,9 @@ static int do_rom_fast_write(struct netxen_adapter *adapter, int addr,
533static int do_rom_fast_read(struct netxen_adapter *adapter, 534static int do_rom_fast_read(struct netxen_adapter *adapter,
534 int addr, int *valp) 535 int addr, int *valp)
535{ 536{
536 cond_resched();
537
538 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr); 537 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
539 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
540 udelay(100); /* prevent bursting on CRB */
541 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 538 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
539 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
542 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb); 540 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
543 if (netxen_wait_rom_done(adapter)) { 541 if (netxen_wait_rom_done(adapter)) {
544 printk("Error waiting for rom done\n"); 542 printk("Error waiting for rom done\n");
@@ -546,7 +544,7 @@ static int do_rom_fast_read(struct netxen_adapter *adapter,
546 } 544 }
547 /* reset abyte_cnt and dummy_byte_cnt */ 545 /* reset abyte_cnt and dummy_byte_cnt */
548 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0); 546 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
549 udelay(100); /* prevent bursting on CRB */ 547 udelay(10);
550 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0); 548 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
551 549
552 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA); 550 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
@@ -884,14 +882,16 @@ int netxen_flash_unlock(struct netxen_adapter *adapter)
884int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) 882int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
885{ 883{
886 int addr, val; 884 int addr, val;
887 int i, init_delay = 0; 885 int i, n, init_delay = 0;
888 struct crb_addr_pair *buf; 886 struct crb_addr_pair *buf;
889 unsigned offset, n; 887 unsigned offset;
890 u32 off; 888 u32 off;
891 889
892 /* resetall */ 890 /* resetall */
891 rom_lock(adapter);
893 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 892 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
894 0xffffffff); 893 0xffffffff);
894 netxen_rom_unlock(adapter);
895 895
896 if (verbose) { 896 if (verbose) {
897 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0) 897 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
@@ -910,7 +910,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
910 910
911 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 911 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
912 if (netxen_rom_fast_read(adapter, 0, &n) != 0 || 912 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
913 (n != 0xcafecafeUL) || 913 (n != 0xcafecafe) ||
914 netxen_rom_fast_read(adapter, 4, &n) != 0) { 914 netxen_rom_fast_read(adapter, 4, &n) != 0) {
915 printk(KERN_ERR "%s: ERROR Reading crb_init area: " 915 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
916 "n: %08x\n", netxen_nic_driver_name, n); 916 "n: %08x\n", netxen_nic_driver_name, n);
@@ -975,6 +975,14 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
975 /* do not reset PCI */ 975 /* do not reset PCI */
976 if (off == (ROMUSB_GLB + 0xbc)) 976 if (off == (ROMUSB_GLB + 0xbc))
977 continue; 977 continue;
978 if (off == (ROMUSB_GLB + 0xa8))
979 continue;
980 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
981 continue;
982 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
983 continue;
984 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
985 continue;
978 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18)) 986 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
979 buf[i].data = 0x1020; 987 buf[i].data = 0x1020;
980 /* skip the function enable register */ 988 /* skip the function enable register */
@@ -992,23 +1000,21 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
992 continue; 1000 continue;
993 } 1001 }
994 1002
1003 init_delay = 1;
995 /* After writing this register, HW needs time for CRB */ 1004 /* After writing this register, HW needs time for CRB */
996 /* to quiet down (else crb_window returns 0xffffffff) */ 1005 /* to quiet down (else crb_window returns 0xffffffff) */
997 if (off == NETXEN_ROMUSB_GLB_SW_RESET) { 1006 if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
998 init_delay = 1; 1007 init_delay = 1000;
999 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1008 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1000 /* hold xdma in reset also */ 1009 /* hold xdma in reset also */
1001 buf[i].data = NETXEN_NIC_XDMA_RESET; 1010 buf[i].data = NETXEN_NIC_XDMA_RESET;
1011 buf[i].data = 0x8000ff;
1002 } 1012 }
1003 } 1013 }
1004 1014
1005 adapter->hw_write_wx(adapter, off, &buf[i].data, 4); 1015 adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
1006 1016
1007 if (init_delay == 1) { 1017 msleep(init_delay);
1008 msleep(1000);
1009 init_delay = 0;
1010 }
1011 msleep(1);
1012 } 1018 }
1013 kfree(buf); 1019 kfree(buf);
1014 1020
@@ -1277,7 +1283,7 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1277 1283
1278 dev_kfree_skb_any(skb); 1284 dev_kfree_skb_any(skb);
1279 for (i = 0; i < nr_frags; i++) { 1285 for (i = 0; i < nr_frags; i++) {
1280 index = frag_desc->frag_handles[i]; 1286 index = le16_to_cpu(frag_desc->frag_handles[i]);
1281 skb = netxen_process_rxbuf(adapter, 1287 skb = netxen_process_rxbuf(adapter,
1282 rds_ring, index, cksum); 1288 rds_ring, index, cksum);
1283 if (skb) 1289 if (skb)
@@ -1428,7 +1434,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1428 struct rcv_desc *pdesc; 1434 struct rcv_desc *pdesc;
1429 struct netxen_rx_buffer *buffer; 1435 struct netxen_rx_buffer *buffer;
1430 int count = 0; 1436 int count = 0;
1431 int index = 0;
1432 netxen_ctx_msg msg = 0; 1437 netxen_ctx_msg msg = 0;
1433 dma_addr_t dma; 1438 dma_addr_t dma;
1434 struct list_head *head; 1439 struct list_head *head;
@@ -1436,7 +1441,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1436 rds_ring = &recv_ctx->rds_rings[ringid]; 1441 rds_ring = &recv_ctx->rds_rings[ringid];
1437 1442
1438 producer = rds_ring->producer; 1443 producer = rds_ring->producer;
1439 index = rds_ring->begin_alloc;
1440 head = &rds_ring->free_list; 1444 head = &rds_ring->free_list;
1441 1445
1442 /* We can start writing rx descriptors into the phantom memory. */ 1446 /* We can start writing rx descriptors into the phantom memory. */
@@ -1444,39 +1448,37 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1444 1448
1445 skb = dev_alloc_skb(rds_ring->skb_size); 1449 skb = dev_alloc_skb(rds_ring->skb_size);
1446 if (unlikely(!skb)) { 1450 if (unlikely(!skb)) {
1447 rds_ring->begin_alloc = index;
1448 break; 1451 break;
1449 } 1452 }
1450 1453
1454 if (!adapter->ahw.cut_through)
1455 skb_reserve(skb, 2);
1456
1457 dma = pci_map_single(pdev, skb->data,
1458 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1459 if (pci_dma_mapping_error(pdev, dma)) {
1460 dev_kfree_skb_any(skb);
1461 break;
1462 }
1463
1464 count++;
1451 buffer = list_entry(head->next, struct netxen_rx_buffer, list); 1465 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1452 list_del(&buffer->list); 1466 list_del(&buffer->list);
1453 1467
1454 count++; /* now there should be no failure */
1455 pdesc = &rds_ring->desc_head[producer];
1456
1457 if (!adapter->ahw.cut_through)
1458 skb_reserve(skb, 2);
1459 /* This will be setup when we receive the
1460 * buffer after it has been filled FSL TBD TBD
1461 * skb->dev = netdev;
1462 */
1463 dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
1464 PCI_DMA_FROMDEVICE);
1465 pdesc->addr_buffer = cpu_to_le64(dma);
1466 buffer->skb = skb; 1468 buffer->skb = skb;
1467 buffer->state = NETXEN_BUFFER_BUSY; 1469 buffer->state = NETXEN_BUFFER_BUSY;
1468 buffer->dma = dma; 1470 buffer->dma = dma;
1471
1469 /* make a rcv descriptor */ 1472 /* make a rcv descriptor */
1473 pdesc = &rds_ring->desc_head[producer];
1474 pdesc->addr_buffer = cpu_to_le64(dma);
1470 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1475 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1471 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); 1476 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1472 DPRINTK(INFO, "done writing descripter\n"); 1477
1473 producer = 1478 producer = get_next_index(producer, rds_ring->max_rx_desc_count);
1474 get_next_index(producer, rds_ring->max_rx_desc_count);
1475 index = get_next_index(index, rds_ring->max_rx_desc_count);
1476 } 1479 }
1477 /* if we did allocate buffers, then write the count to Phantom */ 1480 /* if we did allocate buffers, then write the count to Phantom */
1478 if (count) { 1481 if (count) {
1479 rds_ring->begin_alloc = index;
1480 rds_ring->producer = producer; 1482 rds_ring->producer = producer;
1481 /* Window = 1 */ 1483 /* Window = 1 */
1482 adapter->pci_write_normalize(adapter, 1484 adapter->pci_write_normalize(adapter,
@@ -1515,49 +1517,50 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1515 struct rcv_desc *pdesc; 1517 struct rcv_desc *pdesc;
1516 struct netxen_rx_buffer *buffer; 1518 struct netxen_rx_buffer *buffer;
1517 int count = 0; 1519 int count = 0;
1518 int index = 0;
1519 struct list_head *head; 1520 struct list_head *head;
1521 dma_addr_t dma;
1520 1522
1521 rds_ring = &recv_ctx->rds_rings[ringid]; 1523 rds_ring = &recv_ctx->rds_rings[ringid];
1522 1524
1523 producer = rds_ring->producer; 1525 producer = rds_ring->producer;
1524 index = rds_ring->begin_alloc;
1525 head = &rds_ring->free_list; 1526 head = &rds_ring->free_list;
1526 /* We can start writing rx descriptors into the phantom memory. */ 1527 /* We can start writing rx descriptors into the phantom memory. */
1527 while (!list_empty(head)) { 1528 while (!list_empty(head)) {
1528 1529
1529 skb = dev_alloc_skb(rds_ring->skb_size); 1530 skb = dev_alloc_skb(rds_ring->skb_size);
1530 if (unlikely(!skb)) { 1531 if (unlikely(!skb)) {
1531 rds_ring->begin_alloc = index;
1532 break; 1532 break;
1533 } 1533 }
1534 1534
1535 if (!adapter->ahw.cut_through)
1536 skb_reserve(skb, 2);
1537
1538 dma = pci_map_single(pdev, skb->data,
1539 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1540 if (pci_dma_mapping_error(pdev, dma)) {
1541 dev_kfree_skb_any(skb);
1542 break;
1543 }
1544
1545 count++;
1535 buffer = list_entry(head->next, struct netxen_rx_buffer, list); 1546 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1536 list_del(&buffer->list); 1547 list_del(&buffer->list);
1537 1548
1538 count++; /* now there should be no failure */
1539 pdesc = &rds_ring->desc_head[producer];
1540 if (!adapter->ahw.cut_through)
1541 skb_reserve(skb, 2);
1542 buffer->skb = skb; 1549 buffer->skb = skb;
1543 buffer->state = NETXEN_BUFFER_BUSY; 1550 buffer->state = NETXEN_BUFFER_BUSY;
1544 buffer->dma = pci_map_single(pdev, skb->data, 1551 buffer->dma = dma;
1545 rds_ring->dma_size,
1546 PCI_DMA_FROMDEVICE);
1547 1552
1548 /* make a rcv descriptor */ 1553 /* make a rcv descriptor */
1554 pdesc = &rds_ring->desc_head[producer];
1549 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1555 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1550 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); 1556 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1551 pdesc->addr_buffer = cpu_to_le64(buffer->dma); 1557 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1552 producer = 1558
1553 get_next_index(producer, rds_ring->max_rx_desc_count); 1559 producer = get_next_index(producer, rds_ring->max_rx_desc_count);
1554 index = get_next_index(index, rds_ring->max_rx_desc_count);
1555 buffer = &rds_ring->rx_buf_arr[index];
1556 } 1560 }
1557 1561
1558 /* if we did allocate buffers, then write the count to Phantom */ 1562 /* if we did allocate buffers, then write the count to Phantom */
1559 if (count) { 1563 if (count) {
1560 rds_ring->begin_alloc = index;
1561 rds_ring->producer = producer; 1564 rds_ring->producer = producer;
1562 /* Window = 1 */ 1565 /* Window = 1 */
1563 adapter->pci_write_normalize(adapter, 1566 adapter->pci_write_normalize(adapter,
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index ba01524b5531..645d384fe87e 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -39,7 +39,9 @@
39#include "netxen_nic_phan_reg.h" 39#include "netxen_nic_phan_reg.h"
40 40
41#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
42#include <linux/if_vlan.h>
42#include <net/ip.h> 43#include <net/ip.h>
44#include <linux/ipv6.h>
43 45
44MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); 46MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
45MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
@@ -242,7 +244,7 @@ static void netxen_check_options(struct netxen_adapter *adapter)
242 case NETXEN_BRDTYPE_P3_4_GB: 244 case NETXEN_BRDTYPE_P3_4_GB:
243 case NETXEN_BRDTYPE_P3_4_GB_MM: 245 case NETXEN_BRDTYPE_P3_4_GB_MM:
244 adapter->msix_supported = !!use_msi_x; 246 adapter->msix_supported = !!use_msi_x;
245 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; 247 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
246 break; 248 break;
247 249
248 case NETXEN_BRDTYPE_P2_SB35_4G: 250 case NETXEN_BRDTYPE_P2_SB35_4G:
@@ -251,6 +253,14 @@ static void netxen_check_options(struct netxen_adapter *adapter)
251 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G; 253 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
252 break; 254 break;
253 255
256 case NETXEN_BRDTYPE_P3_10G_TP:
257 adapter->msix_supported = !!use_msi_x;
258 if (adapter->ahw.board_type == NETXEN_NIC_XGBE)
259 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
260 else
261 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
262 break;
263
254 default: 264 default:
255 adapter->msix_supported = 0; 265 adapter->msix_supported = 0;
256 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G; 266 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_1G;
@@ -271,10 +281,15 @@ static void netxen_check_options(struct netxen_adapter *adapter)
271static int 281static int
272netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot) 282netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
273{ 283{
274 int ret = 0; 284 u32 val, timeout;
275 285
276 if (first_boot == 0x55555555) { 286 if (first_boot == 0x55555555) {
277 /* This is the first boot after power up */ 287 /* This is the first boot after power up */
288 adapter->pci_write_normalize(adapter,
289 NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
290
291 if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
292 return 0;
278 293
279 /* PCI bus master workaround */ 294 /* PCI bus master workaround */
280 adapter->hw_read_wx(adapter, 295 adapter->hw_read_wx(adapter,
@@ -294,18 +309,26 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
294 /* clear the register for future unloads/loads */ 309 /* clear the register for future unloads/loads */
295 adapter->pci_write_normalize(adapter, 310 adapter->pci_write_normalize(adapter,
296 NETXEN_CAM_RAM(0x1fc), 0); 311 NETXEN_CAM_RAM(0x1fc), 0);
297 ret = -1; 312 return -EIO;
298 } 313 }
299 314
300 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 315 /* Start P2 boot loader */
301 /* Start P2 boot loader */ 316 val = adapter->pci_read_normalize(adapter,
302 adapter->pci_write_normalize(adapter, 317 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
303 NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC); 318 adapter->pci_write_normalize(adapter,
304 adapter->pci_write_normalize(adapter, 319 NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
305 NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1); 320 timeout = 0;
306 } 321 do {
322 msleep(1);
323 val = adapter->pci_read_normalize(adapter,
324 NETXEN_CAM_RAM(0x1fc));
325
326 if (++timeout > 5000)
327 return -EIO;
328
329 } while (val == NETXEN_BDINFO_MAGIC);
307 } 330 }
308 return ret; 331 return 0;
309} 332}
310 333
311static void netxen_set_port_mode(struct netxen_adapter *adapter) 334static void netxen_set_port_mode(struct netxen_adapter *adapter)
@@ -712,17 +735,18 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
712 735
713 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 736 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
714 737
715 /* ScatterGather support */ 738 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
716 netdev->features = NETIF_F_SG; 739 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
717 netdev->features |= NETIF_F_IP_CSUM; 740
718 netdev->features |= NETIF_F_TSO;
719 if (NX_IS_REVISION_P3(revision_id)) { 741 if (NX_IS_REVISION_P3(revision_id)) {
720 netdev->features |= NETIF_F_IPV6_CSUM; 742 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
721 netdev->features |= NETIF_F_TSO6; 743 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
722 } 744 }
723 745
724 if (adapter->pci_using_dac) 746 if (adapter->pci_using_dac) {
725 netdev->features |= NETIF_F_HIGHDMA; 747 netdev->features |= NETIF_F_HIGHDMA;
748 netdev->vlan_features |= NETIF_F_HIGHDMA;
749 }
726 750
727 /* 751 /*
728 * Set the CRB window to invalid. If any register in window 0 is 752 * Set the CRB window to invalid. If any register in window 0 is
@@ -784,8 +808,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
784 CRB_CMDPEG_STATE, 0); 808 CRB_CMDPEG_STATE, 0);
785 netxen_pinit_from_rom(adapter, 0); 809 netxen_pinit_from_rom(adapter, 0);
786 msleep(1); 810 msleep(1);
787 netxen_load_firmware(adapter);
788 } 811 }
812 netxen_load_firmware(adapter);
789 813
790 if (NX_IS_REVISION_P3(revision_id)) 814 if (NX_IS_REVISION_P3(revision_id))
791 netxen_pcie_strap_init(adapter); 815 netxen_pcie_strap_init(adapter);
@@ -801,13 +825,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
801 825
802 } 826 }
803 827
804 if ((first_boot == 0x55555555) &&
805 (NX_IS_REVISION_P2(revision_id))) {
806 /* Unlock the HW, prompting the boot sequence */
807 adapter->pci_write_normalize(adapter,
808 NETXEN_ROMUSB_GLB_PEGTUNE_DONE, 1);
809 }
810
811 err = netxen_initialize_adapter_offload(adapter); 828 err = netxen_initialize_adapter_offload(adapter);
812 if (err) 829 if (err)
813 goto err_out_iounmap; 830 goto err_out_iounmap;
@@ -821,7 +838,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
821 adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, i); 838 adapter->pci_write_normalize(adapter, CRB_DRIVER_VERSION, i);
822 839
823 /* Handshake with the card before we register the devices. */ 840 /* Handshake with the card before we register the devices. */
824 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 841 err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
842 if (err)
843 goto err_out_free_offload;
825 844
826 } /* first_driver */ 845 } /* first_driver */
827 846
@@ -925,6 +944,7 @@ err_out_disable_msi:
925 if (adapter->flags & NETXEN_NIC_MSI_ENABLED) 944 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
926 pci_disable_msi(pdev); 945 pci_disable_msi(pdev);
927 946
947err_out_free_offload:
928 if (first_driver) 948 if (first_driver)
929 netxen_free_adapter_offload(adapter); 949 netxen_free_adapter_offload(adapter);
930 950
@@ -968,6 +988,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
968 netxen_free_hw_resources(adapter); 988 netxen_free_hw_resources(adapter);
969 netxen_release_rx_buffers(adapter); 989 netxen_release_rx_buffers(adapter);
970 netxen_free_sw_resources(adapter); 990 netxen_free_sw_resources(adapter);
991
992 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
993 netxen_p3_free_mac_list(adapter);
971 } 994 }
972 995
973 if (adapter->portnum == 0) 996 if (adapter->portnum == 0)
@@ -983,8 +1006,10 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
983 1006
984 iounmap(adapter->ahw.db_base); 1007 iounmap(adapter->ahw.db_base);
985 iounmap(adapter->ahw.pci_base0); 1008 iounmap(adapter->ahw.pci_base0);
986 iounmap(adapter->ahw.pci_base1); 1009 if (adapter->ahw.pci_base1 != NULL)
987 iounmap(adapter->ahw.pci_base2); 1010 iounmap(adapter->ahw.pci_base1);
1011 if (adapter->ahw.pci_base2 != NULL)
1012 iounmap(adapter->ahw.pci_base2);
988 1013
989 pci_release_regions(pdev); 1014 pci_release_regions(pdev);
990 pci_disable_device(pdev); 1015 pci_disable_device(pdev);
@@ -1137,29 +1162,72 @@ static int netxen_nic_close(struct net_device *netdev)
1137 return 0; 1162 return 0;
1138} 1163}
1139 1164
1140void netxen_tso_check(struct netxen_adapter *adapter, 1165static bool netxen_tso_check(struct net_device *netdev,
1141 struct cmd_desc_type0 *desc, struct sk_buff *skb) 1166 struct cmd_desc_type0 *desc, struct sk_buff *skb)
1142{ 1167{
1143 if (desc->mss) { 1168 bool tso = false;
1144 desc->total_hdr_length = (sizeof(struct ethhdr) + 1169 u8 opcode = TX_ETHER_PKT;
1145 ip_hdrlen(skb) + tcp_hdrlen(skb)); 1170 __be16 protocol = skb->protocol;
1171 u16 flags = 0;
1172
1173 if (protocol == __constant_htons(ETH_P_8021Q)) {
1174 struct vlan_ethhdr *vh = (struct vlan_ethhdr *)skb->data;
1175 protocol = vh->h_vlan_encapsulated_proto;
1176 flags = FLAGS_VLAN_TAGGED;
1177 }
1146 1178
1147 if ((NX_IS_REVISION_P3(adapter->ahw.revision_id)) && 1179 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1148 (skb->protocol == htons(ETH_P_IPV6))) 1180 skb_shinfo(skb)->gso_size > 0) {
1149 netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO6); 1181
1150 else 1182 desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1151 netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); 1183 desc->total_hdr_length =
1184 skb_transport_offset(skb) + tcp_hdrlen(skb);
1185
1186 opcode = (protocol == __constant_htons(ETH_P_IPV6)) ?
1187 TX_TCP_LSO6 : TX_TCP_LSO;
1188 tso = true;
1152 1189
1153 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 1190 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1154 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1191 u8 l4proto;
1155 netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); 1192
1156 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1193 if (protocol == __constant_htons(ETH_P_IP)) {
1157 netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); 1194 l4proto = ip_hdr(skb)->protocol;
1158 else 1195
1159 return; 1196 if (l4proto == IPPROTO_TCP)
1197 opcode = TX_TCP_PKT;
1198 else if(l4proto == IPPROTO_UDP)
1199 opcode = TX_UDP_PKT;
1200 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1201 l4proto = ipv6_hdr(skb)->nexthdr;
1202
1203 if (l4proto == IPPROTO_TCP)
1204 opcode = TX_TCPV6_PKT;
1205 else if(l4proto == IPPROTO_UDP)
1206 opcode = TX_UDPV6_PKT;
1207 }
1160 } 1208 }
1161 desc->tcp_hdr_offset = skb_transport_offset(skb); 1209 desc->tcp_hdr_offset = skb_transport_offset(skb);
1162 desc->ip_hdr_offset = skb_network_offset(skb); 1210 desc->ip_hdr_offset = skb_network_offset(skb);
1211 netxen_set_tx_flags_opcode(desc, flags, opcode);
1212 return tso;
1213}
1214
1215static void
1216netxen_clean_tx_dma_mapping(struct pci_dev *pdev,
1217 struct netxen_cmd_buffer *pbuf, int last)
1218{
1219 int k;
1220 struct netxen_skb_frag *buffrag;
1221
1222 buffrag = &pbuf->frag_array[0];
1223 pci_unmap_single(pdev, buffrag->dma,
1224 buffrag->length, PCI_DMA_TODEVICE);
1225
1226 for (k = 1; k < last; k++) {
1227 buffrag = &pbuf->frag_array[k];
1228 pci_unmap_page(pdev, buffrag->dma,
1229 buffrag->length, PCI_DMA_TODEVICE);
1230 }
1163} 1231}
1164 1232
1165static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1233static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -1167,33 +1235,22 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1167 struct netxen_adapter *adapter = netdev_priv(netdev); 1235 struct netxen_adapter *adapter = netdev_priv(netdev);
1168 struct netxen_hardware_context *hw = &adapter->ahw; 1236 struct netxen_hardware_context *hw = &adapter->ahw;
1169 unsigned int first_seg_len = skb->len - skb->data_len; 1237 unsigned int first_seg_len = skb->len - skb->data_len;
1238 struct netxen_cmd_buffer *pbuf;
1170 struct netxen_skb_frag *buffrag; 1239 struct netxen_skb_frag *buffrag;
1171 unsigned int i; 1240 struct cmd_desc_type0 *hwdesc;
1241 struct pci_dev *pdev = adapter->pdev;
1242 dma_addr_t temp_dma;
1243 int i, k;
1172 1244
1173 u32 producer, consumer; 1245 u32 producer, consumer;
1174 u32 saved_producer = 0; 1246 int frag_count, no_of_desc;
1175 struct cmd_desc_type0 *hwdesc;
1176 int k;
1177 struct netxen_cmd_buffer *pbuf = NULL;
1178 int frag_count;
1179 int no_of_desc;
1180 u32 num_txd = adapter->max_tx_desc_count; 1247 u32 num_txd = adapter->max_tx_desc_count;
1248 bool is_tso = false;
1181 1249
1182 frag_count = skb_shinfo(skb)->nr_frags + 1; 1250 frag_count = skb_shinfo(skb)->nr_frags + 1;
1183 1251
1184 /* There 4 fragments per descriptor */ 1252 /* There 4 fragments per descriptor */
1185 no_of_desc = (frag_count + 3) >> 2; 1253 no_of_desc = (frag_count + 3) >> 2;
1186 if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1187 if (skb_shinfo(skb)->gso_size > 0) {
1188
1189 no_of_desc++;
1190 if ((ip_hdrlen(skb) + tcp_hdrlen(skb) +
1191 sizeof(struct ethhdr)) >
1192 (sizeof(struct cmd_desc_type0) - 2)) {
1193 no_of_desc++;
1194 }
1195 }
1196 }
1197 1254
1198 producer = adapter->cmd_producer; 1255 producer = adapter->cmd_producer;
1199 smp_mb(); 1256 smp_mb();
@@ -1205,34 +1262,26 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1205 } 1262 }
1206 1263
1207 /* Copy the descriptors into the hardware */ 1264 /* Copy the descriptors into the hardware */
1208 saved_producer = producer;
1209 hwdesc = &hw->cmd_desc_head[producer]; 1265 hwdesc = &hw->cmd_desc_head[producer];
1210 memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); 1266 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
1211 /* Take skb->data itself */ 1267 /* Take skb->data itself */
1212 pbuf = &adapter->cmd_buf_arr[producer]; 1268 pbuf = &adapter->cmd_buf_arr[producer];
1213 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && 1269
1214 skb_shinfo(skb)->gso_size > 0) { 1270 is_tso = netxen_tso_check(netdev, hwdesc, skb);
1215 pbuf->mss = skb_shinfo(skb)->gso_size; 1271
1216 hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1217 } else {
1218 pbuf->mss = 0;
1219 hwdesc->mss = 0;
1220 }
1221 pbuf->total_length = skb->len;
1222 pbuf->skb = skb; 1272 pbuf->skb = skb;
1223 pbuf->cmd = TX_ETHER_PKT;
1224 pbuf->frag_count = frag_count; 1273 pbuf->frag_count = frag_count;
1225 pbuf->port = adapter->portnum;
1226 buffrag = &pbuf->frag_array[0]; 1274 buffrag = &pbuf->frag_array[0];
1227 buffrag->dma = pci_map_single(adapter->pdev, skb->data, first_seg_len, 1275 temp_dma = pci_map_single(pdev, skb->data, first_seg_len,
1228 PCI_DMA_TODEVICE); 1276 PCI_DMA_TODEVICE);
1277 if (pci_dma_mapping_error(pdev, temp_dma))
1278 goto drop_packet;
1279
1280 buffrag->dma = temp_dma;
1229 buffrag->length = first_seg_len; 1281 buffrag->length = first_seg_len;
1230 netxen_set_cmd_desc_totallength(hwdesc, skb->len); 1282 netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
1231 netxen_set_cmd_desc_num_of_buff(hwdesc, frag_count); 1283 netxen_set_tx_port(hwdesc, adapter->portnum);
1232 netxen_set_cmd_desc_opcode(hwdesc, TX_ETHER_PKT);
1233 1284
1234 netxen_set_cmd_desc_port(hwdesc, adapter->portnum);
1235 netxen_set_cmd_desc_ctxid(hwdesc, adapter->portnum);
1236 hwdesc->buffer1_length = cpu_to_le16(first_seg_len); 1285 hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
1237 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); 1286 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1238 1287
@@ -1240,7 +1289,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1240 struct skb_frag_struct *frag; 1289 struct skb_frag_struct *frag;
1241 int len, temp_len; 1290 int len, temp_len;
1242 unsigned long offset; 1291 unsigned long offset;
1243 dma_addr_t temp_dma;
1244 1292
1245 /* move to next desc. if there is a need */ 1293 /* move to next desc. if there is a need */
1246 if ((i & 0x3) == 0) { 1294 if ((i & 0x3) == 0) {
@@ -1256,8 +1304,12 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1256 offset = frag->page_offset; 1304 offset = frag->page_offset;
1257 1305
1258 temp_len = len; 1306 temp_len = len;
1259 temp_dma = pci_map_page(adapter->pdev, frag->page, offset, 1307 temp_dma = pci_map_page(pdev, frag->page, offset,
1260 len, PCI_DMA_TODEVICE); 1308 len, PCI_DMA_TODEVICE);
1309 if (pci_dma_mapping_error(pdev, temp_dma)) {
1310 netxen_clean_tx_dma_mapping(pdev, pbuf, i);
1311 goto drop_packet;
1312 }
1261 1313
1262 buffrag++; 1314 buffrag++;
1263 buffrag->dma = temp_dma; 1315 buffrag->dma = temp_dma;
@@ -1285,16 +1337,12 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1285 } 1337 }
1286 producer = get_next_index(producer, num_txd); 1338 producer = get_next_index(producer, num_txd);
1287 1339
1288 /* might change opcode to TX_TCP_LSO */
1289 netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb);
1290
1291 /* For LSO, we need to copy the MAC/IP/TCP headers into 1340 /* For LSO, we need to copy the MAC/IP/TCP headers into
1292 * the descriptor ring 1341 * the descriptor ring
1293 */ 1342 */
1294 if (netxen_get_cmd_desc_opcode(&hw->cmd_desc_head[saved_producer]) 1343 if (is_tso) {
1295 == TX_TCP_LSO) {
1296 int hdr_len, first_hdr_len, more_hdr; 1344 int hdr_len, first_hdr_len, more_hdr;
1297 hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length; 1345 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1298 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) { 1346 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
1299 first_hdr_len = sizeof(struct cmd_desc_type0) - 2; 1347 first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
1300 more_hdr = 1; 1348 more_hdr = 1;
@@ -1336,6 +1384,11 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1336 netdev->trans_start = jiffies; 1384 netdev->trans_start = jiffies;
1337 1385
1338 return NETDEV_TX_OK; 1386 return NETDEV_TX_OK;
1387
1388drop_packet:
1389 adapter->stats.txdropped++;
1390 dev_kfree_skb_any(skb);
1391 return NETDEV_TX_OK;
1339} 1392}
1340 1393
1341static int netxen_nic_check_temp(struct netxen_adapter *adapter) 1394static int netxen_nic_check_temp(struct netxen_adapter *adapter)
@@ -1407,6 +1460,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1407 netif_carrier_off(netdev); 1460 netif_carrier_off(netdev);
1408 netif_stop_queue(netdev); 1461 netif_stop_queue(netdev);
1409 } 1462 }
1463
1464 netxen_nic_set_link_parameters(adapter);
1410 } else if (!adapter->ahw.linkup && linkup) { 1465 } else if (!adapter->ahw.linkup && linkup) {
1411 printk(KERN_INFO "%s: %s NIC Link is up\n", 1466 printk(KERN_INFO "%s: %s NIC Link is up\n",
1412 netxen_nic_driver_name, netdev->name); 1467 netxen_nic_driver_name, netdev->name);
@@ -1415,6 +1470,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1415 netif_carrier_on(netdev); 1470 netif_carrier_on(netdev);
1416 netif_wake_queue(netdev); 1471 netif_wake_queue(netdev);
1417 } 1472 }
1473
1474 netxen_nic_set_link_parameters(adapter);
1418 } 1475 }
1419} 1476}
1420 1477
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 11adf6ed4628..811a637695ca 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -296,9 +296,8 @@ static int mdio_bus_suspend(struct device * dev, pm_message_t state)
296 struct phy_driver *phydrv = to_phy_driver(drv); 296 struct phy_driver *phydrv = to_phy_driver(drv);
297 struct phy_device *phydev = to_phy_device(dev); 297 struct phy_device *phydev = to_phy_device(dev);
298 298
299 if ((!device_may_wakeup(phydev->dev.parent)) && 299 if (drv && phydrv->suspend && !device_may_wakeup(phydev->dev.parent))
300 (phydrv && phydrv->suspend)) 300 ret = phydrv->suspend(phydev);
301 ret = phydrv->suspend(phydev);
302 301
303 return ret; 302 return ret;
304} 303}
@@ -310,8 +309,7 @@ static int mdio_bus_resume(struct device * dev)
310 struct phy_driver *phydrv = to_phy_driver(drv); 309 struct phy_driver *phydrv = to_phy_driver(drv);
311 struct phy_device *phydev = to_phy_device(dev); 310 struct phy_device *phydev = to_phy_device(dev);
312 311
313 if ((!device_may_wakeup(phydev->dev.parent)) && 312 if (drv && phydrv->resume && !device_may_wakeup(phydev->dev.parent))
314 (phydrv && phydrv->resume))
315 ret = phydrv->resume(phydev); 313 ret = phydrv->resume(phydev);
316 314
317 return ret; 315 return ret;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e35460165bf7..0a06e4fd37d9 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -231,15 +231,6 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
231 if ((phy_id & 0x1fffffff) == 0x1fffffff) 231 if ((phy_id & 0x1fffffff) == 0x1fffffff)
232 return NULL; 232 return NULL;
233 233
234 /*
235 * Broken hardware is sometimes missing the pull-up resistor on the
236 * MDIO line, which results in reads to non-existent devices returning
237 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
238 * device as well.
239 */
240 if (phy_id == 0)
241 return NULL;
242
243 dev = phy_device_create(bus, addr, phy_id); 234 dev = phy_device_create(bus, addr, phy_id);
244 235
245 return dev; 236 return dev;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c05d38d46350..1387187543e4 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -81,6 +81,9 @@ static struct phy_driver lan83c185_driver = {
81 .ack_interrupt = smsc_phy_ack_interrupt, 81 .ack_interrupt = smsc_phy_ack_interrupt,
82 .config_intr = smsc_phy_config_intr, 82 .config_intr = smsc_phy_config_intr,
83 83
84 .suspend = genphy_suspend,
85 .resume = genphy_resume,
86
84 .driver = { .owner = THIS_MODULE, } 87 .driver = { .owner = THIS_MODULE, }
85}; 88};
86 89
@@ -102,6 +105,9 @@ static struct phy_driver lan8187_driver = {
102 .ack_interrupt = smsc_phy_ack_interrupt, 105 .ack_interrupt = smsc_phy_ack_interrupt,
103 .config_intr = smsc_phy_config_intr, 106 .config_intr = smsc_phy_config_intr,
104 107
108 .suspend = genphy_suspend,
109 .resume = genphy_resume,
110
105 .driver = { .owner = THIS_MODULE, } 111 .driver = { .owner = THIS_MODULE, }
106}; 112};
107 113
@@ -123,6 +129,9 @@ static struct phy_driver lan8700_driver = {
123 .ack_interrupt = smsc_phy_ack_interrupt, 129 .ack_interrupt = smsc_phy_ack_interrupt,
124 .config_intr = smsc_phy_config_intr, 130 .config_intr = smsc_phy_config_intr,
125 131
132 .suspend = genphy_suspend,
133 .resume = genphy_resume,
134
126 .driver = { .owner = THIS_MODULE, } 135 .driver = { .owner = THIS_MODULE, }
127}; 136};
128 137
@@ -144,6 +153,9 @@ static struct phy_driver lan911x_int_driver = {
144 .ack_interrupt = smsc_phy_ack_interrupt, 153 .ack_interrupt = smsc_phy_ack_interrupt,
145 .config_intr = smsc_phy_config_intr, 154 .config_intr = smsc_phy_config_intr,
146 155
156 .suspend = genphy_suspend,
157 .resume = genphy_resume,
158
147 .driver = { .owner = THIS_MODULE, } 159 .driver = { .owner = THIS_MODULE, }
148}; 160};
149 161
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 06b448285eb5..7b2728b8f1b7 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -250,6 +250,7 @@ static int ppp_connect_channel(struct channel *pch, int unit);
250static int ppp_disconnect_channel(struct channel *pch); 250static int ppp_disconnect_channel(struct channel *pch);
251static void ppp_destroy_channel(struct channel *pch); 251static void ppp_destroy_channel(struct channel *pch);
252static int unit_get(struct idr *p, void *ptr); 252static int unit_get(struct idr *p, void *ptr);
253static int unit_set(struct idr *p, void *ptr, int n);
253static void unit_put(struct idr *p, int n); 254static void unit_put(struct idr *p, int n);
254static void *unit_find(struct idr *p, int n); 255static void *unit_find(struct idr *p, int n);
255 256
@@ -2432,11 +2433,18 @@ ppp_create_interface(int unit, int *retp)
2432 } else { 2433 } else {
2433 if (unit_find(&ppp_units_idr, unit)) 2434 if (unit_find(&ppp_units_idr, unit))
2434 goto out2; /* unit already exists */ 2435 goto out2; /* unit already exists */
2435 else { 2436 /*
2436 /* darn, someone is cheating us? */ 2437 * if caller need a specified unit number
2437 *retp = -EINVAL; 2438 * lets try to satisfy him, otherwise --
2439 * he should better ask us for new unit number
2440 *
2441 * NOTE: yes I know that returning EEXIST it's not
2442 * fair but at least pppd will ask us to allocate
2443 * new unit in this case so user is happy :)
2444 */
2445 unit = unit_set(&ppp_units_idr, ppp, unit);
2446 if (unit < 0)
2438 goto out2; 2447 goto out2;
2439 }
2440 } 2448 }
2441 2449
2442 /* Initialize the new ppp unit */ 2450 /* Initialize the new ppp unit */
@@ -2677,14 +2685,37 @@ static void __exit ppp_cleanup(void)
2677 * by holding all_ppp_mutex 2685 * by holding all_ppp_mutex
2678 */ 2686 */
2679 2687
2688/* associate pointer with specified number */
2689static int unit_set(struct idr *p, void *ptr, int n)
2690{
2691 int unit, err;
2692
2693again:
2694 if (!idr_pre_get(p, GFP_KERNEL)) {
2695 printk(KERN_ERR "PPP: No free memory for idr\n");
2696 return -ENOMEM;
2697 }
2698
2699 err = idr_get_new_above(p, ptr, n, &unit);
2700 if (err == -EAGAIN)
2701 goto again;
2702
2703 if (unit != n) {
2704 idr_remove(p, unit);
2705 return -EINVAL;
2706 }
2707
2708 return unit;
2709}
2710
2680/* get new free unit number and associate pointer with it */ 2711/* get new free unit number and associate pointer with it */
2681static int unit_get(struct idr *p, void *ptr) 2712static int unit_get(struct idr *p, void *ptr)
2682{ 2713{
2683 int unit, err; 2714 int unit, err;
2684 2715
2685again: 2716again:
2686 if (idr_pre_get(p, GFP_KERNEL) == 0) { 2717 if (!idr_pre_get(p, GFP_KERNEL)) {
2687 printk(KERN_ERR "Out of memory expanding drawable idr\n"); 2718 printk(KERN_ERR "PPP: No free memory for idr\n");
2688 return -ENOMEM; 2719 return -ENOMEM;
2689 } 2720 }
2690 2721
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 6cbefcae9ac2..be4465bc0a69 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -509,10 +509,10 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
509 else 509 else
510 ret = sis900_get_mac_addr(pci_dev, net_dev); 510 ret = sis900_get_mac_addr(pci_dev, net_dev);
511 511
512 if (ret == 0) { 512 if (!ret || !is_valid_ether_addr(net_dev->dev_addr)) {
513 printk(KERN_WARNING "%s: Cannot read MAC address.\n", dev_name); 513 random_ether_addr(net_dev->dev_addr);
514 ret = -ENODEV; 514 printk(KERN_WARNING "%s: Unreadable or invalid MAC address,"
515 goto err_unmap_rx; 515 "using random generated one\n", dev_name);
516 } 516 }
517 517
518 /* 630ET : set the mii access mode as software-mode */ 518 /* 630ET : set the mii access mode as software-mode */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 5e2dbaee125b..8b3f84685387 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -7535,11 +7535,58 @@ static int tg3_test_msi(struct tg3 *tp)
7535 return err; 7535 return err;
7536} 7536}
7537 7537
7538static int tg3_request_firmware(struct tg3 *tp)
7539{
7540 const __be32 *fw_data;
7541
7542 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
7543 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
7544 tp->dev->name, tp->fw_needed);
7545 return -ENOENT;
7546 }
7547
7548 fw_data = (void *)tp->fw->data;
7549
7550 /* Firmware blob starts with version numbers, followed by
7551 * start address and _full_ length including BSS sections
7552 * (which must be longer than the actual data, of course
7553 */
7554
7555 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
7556 if (tp->fw_len < (tp->fw->size - 12)) {
7557 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
7558 tp->dev->name, tp->fw_len, tp->fw_needed);
7559 release_firmware(tp->fw);
7560 tp->fw = NULL;
7561 return -EINVAL;
7562 }
7563
7564 /* We no longer need firmware; we have it. */
7565 tp->fw_needed = NULL;
7566 return 0;
7567}
7568
7538static int tg3_open(struct net_device *dev) 7569static int tg3_open(struct net_device *dev)
7539{ 7570{
7540 struct tg3 *tp = netdev_priv(dev); 7571 struct tg3 *tp = netdev_priv(dev);
7541 int err; 7572 int err;
7542 7573
7574 if (tp->fw_needed) {
7575 err = tg3_request_firmware(tp);
7576 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7577 if (err)
7578 return err;
7579 } else if (err) {
7580 printk(KERN_WARNING "%s: TSO capability disabled.\n",
7581 tp->dev->name);
7582 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
7583 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7584 printk(KERN_NOTICE "%s: TSO capability restored.\n",
7585 tp->dev->name);
7586 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
7587 }
7588 }
7589
7543 netif_carrier_off(tp->dev); 7590 netif_carrier_off(tp->dev);
7544 7591
7545 err = tg3_set_power_state(tp, PCI_D0); 7592 err = tg3_set_power_state(tp, PCI_D0);
@@ -12934,7 +12981,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
12934 struct net_device *dev; 12981 struct net_device *dev;
12935 struct tg3 *tp; 12982 struct tg3 *tp;
12936 int err, pm_cap; 12983 int err, pm_cap;
12937 const char *fw_name = NULL;
12938 char str[40]; 12984 char str[40];
12939 u64 dma_mask, persist_dma_mask; 12985 u64 dma_mask, persist_dma_mask;
12940 12986
@@ -13091,7 +13137,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13091 tg3_init_bufmgr_config(tp); 13137 tg3_init_bufmgr_config(tp);
13092 13138
13093 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 13139 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13094 fw_name = FIRMWARE_TG3; 13140 tp->fw_needed = FIRMWARE_TG3;
13095 13141
13096 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 13142 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13097 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 13143 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
@@ -13104,37 +13150,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13104 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 13150 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13105 } else { 13151 } else {
13106 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG; 13152 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13107 }
13108 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
13109 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) 13153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13110 fw_name = FIRMWARE_TG3TSO5; 13154 tp->fw_needed = FIRMWARE_TG3TSO5;
13111 else 13155 else
13112 fw_name = FIRMWARE_TG3TSO; 13156 tp->fw_needed = FIRMWARE_TG3TSO;
13113 }
13114
13115 if (fw_name) {
13116 const __be32 *fw_data;
13117
13118 err = request_firmware(&tp->fw, fw_name, &tp->pdev->dev);
13119 if (err) {
13120 printk(KERN_ERR "tg3: Failed to load firmware \"%s\"\n",
13121 fw_name);
13122 goto err_out_iounmap;
13123 }
13124
13125 fw_data = (void *)tp->fw->data;
13126
13127 /* Firmware blob starts with version numbers, followed by
13128 start address and _full_ length including BSS sections
13129 (which must be longer than the actual data, of course */
13130
13131 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
13132 if (tp->fw_len < (tp->fw->size - 12)) {
13133 printk(KERN_ERR "tg3: bogus length %d in \"%s\"\n",
13134 tp->fw_len, fw_name);
13135 err = -EINVAL;
13136 goto err_out_fw;
13137 }
13138 } 13157 }
13139 13158
13140 /* TSO is on by default on chips that support hardware TSO. 13159 /* TSO is on by default on chips that support hardware TSO.
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ae5da603c6af..508def3e077f 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2764,6 +2764,7 @@ struct tg3 {
2764 struct ethtool_coalesce coal; 2764 struct ethtool_coalesce coal;
2765 2765
2766 /* firmware info */ 2766 /* firmware info */
2767 const char *fw_needed;
2767 const struct firmware *fw; 2768 const struct firmware *fw;
2768 u32 fw_len; /* includes BSS */ 2769 u32 fw_len; /* includes BSS */
2769}; 2770};
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index c4918b86ed19..0d0fa91c0251 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1297,6 +1297,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1297 /* setup */ 1297 /* setup */
1298 spin_lock_irq(&serial->serial_lock); 1298 spin_lock_irq(&serial->serial_lock);
1299 tty->driver_data = serial; 1299 tty->driver_data = serial;
1300 tty_kref_put(serial->tty);
1300 serial->tty = tty_kref_get(tty); 1301 serial->tty = tty_kref_get(tty);
1301 spin_unlock_irq(&serial->serial_lock); 1302 spin_unlock_irq(&serial->serial_lock);
1302 1303
@@ -1792,8 +1793,8 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
1792 1793
1793 /* initialize */ 1794 /* initialize */
1794 ctrl_req->wValue = 0; 1795 ctrl_req->wValue = 0;
1795 ctrl_req->wIndex = hso_port_to_mux(port); 1796 ctrl_req->wIndex = cpu_to_le16(hso_port_to_mux(port));
1796 ctrl_req->wLength = size; 1797 ctrl_req->wLength = cpu_to_le16(size);
1797 1798
1798 if (type == USB_CDC_GET_ENCAPSULATED_RESPONSE) { 1799 if (type == USB_CDC_GET_ENCAPSULATED_RESPONSE) {
1799 /* Reading command */ 1800 /* Reading command */
@@ -2043,9 +2044,8 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2043 return -2; 2044 return -2;
2044 } 2045 }
2045 2046
2046 spin_lock(&serial->serial_lock); 2047 /* All callers to put_rxbuf_data hold serial_lock */
2047 tty = tty_kref_get(serial->tty); 2048 tty = tty_kref_get(serial->tty);
2048 spin_unlock(&serial->serial_lock);
2049 2049
2050 /* Push data to tty */ 2050 /* Push data to tty */
2051 if (tty) { 2051 if (tty) {
@@ -2053,8 +2053,10 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2053 serial->curr_rx_urb_offset; 2053 serial->curr_rx_urb_offset;
2054 D1("data to push to tty"); 2054 D1("data to push to tty");
2055 while (write_length_remaining) { 2055 while (write_length_remaining) {
2056 if (test_bit(TTY_THROTTLED, &tty->flags)) 2056 if (test_bit(TTY_THROTTLED, &tty->flags)) {
2057 tty_kref_put(tty);
2057 return -1; 2058 return -1;
2059 }
2058 curr_write_len = tty_insert_flip_string 2060 curr_write_len = tty_insert_flip_string
2059 (tty, urb->transfer_buffer + 2061 (tty, urb->transfer_buffer +
2060 serial->curr_rx_urb_offset, 2062 serial->curr_rx_urb_offset,
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 5385d66b306e..ced8f36ebd01 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -94,10 +94,18 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
94{ 94{
95 struct usb_device *xdev = dev->udev; 95 struct usb_device *xdev = dev->udev;
96 int ret; 96 int ret;
97 void *buffer;
98
99 buffer = kmalloc(size, GFP_NOIO);
100 if (buffer == NULL)
101 return -ENOMEM;
97 102
98 ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ, 103 ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
99 MCS7830_RD_BMREQ, 0x0000, index, data, 104 MCS7830_RD_BMREQ, 0x0000, index, buffer,
100 size, MCS7830_CTRL_TIMEOUT); 105 size, MCS7830_CTRL_TIMEOUT);
106 memcpy(data, buffer, size);
107 kfree(buffer);
108
101 return ret; 109 return ret;
102} 110}
103 111
@@ -105,10 +113,18 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
105{ 113{
106 struct usb_device *xdev = dev->udev; 114 struct usb_device *xdev = dev->udev;
107 int ret; 115 int ret;
116 void *buffer;
117
118 buffer = kmalloc(size, GFP_NOIO);
119 if (buffer == NULL)
120 return -ENOMEM;
121
122 memcpy(buffer, data, size);
108 123
109 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ, 124 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
110 MCS7830_WR_BMREQ, 0x0000, index, data, 125 MCS7830_WR_BMREQ, 0x0000, index, buffer,
111 size, MCS7830_CTRL_TIMEOUT); 126 size, MCS7830_CTRL_TIMEOUT);
127 kfree(buffer);
112 return ret; 128 return ret;
113} 129}
114 130
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index a75f91dc3153..c5691fdb7079 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1302,7 +1302,7 @@ static void velocity_free_rd_ring(struct velocity_info *vptr)
1302static int velocity_init_td_ring(struct velocity_info *vptr) 1302static int velocity_init_td_ring(struct velocity_info *vptr)
1303{ 1303{
1304 dma_addr_t curr; 1304 dma_addr_t curr;
1305 unsigned int j; 1305 int j;
1306 1306
1307 /* Init the TD ring entries */ 1307 /* Init the TD ring entries */
1308 for (j = 0; j < vptr->tx.numq; j++) { 1308 for (j = 0; j < vptr->tx.numq; j++) {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 43f6523c40be..63ef2a8905fb 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -24,6 +24,7 @@
24#include <linux/virtio.h> 24#include <linux/virtio.h>
25#include <linux/virtio_net.h> 25#include <linux/virtio_net.h>
26#include <linux/scatterlist.h> 26#include <linux/scatterlist.h>
27#include <linux/if_vlan.h>
27 28
28static int napi_weight = 128; 29static int napi_weight = 128;
29module_param(napi_weight, int, 0444); 30module_param(napi_weight, int, 0444);
@@ -33,7 +34,7 @@ module_param(csum, bool, 0444);
33module_param(gso, bool, 0444); 34module_param(gso, bool, 0444);
34 35
35/* FIXME: MTU in config. */ 36/* FIXME: MTU in config. */
36#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN) 37#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
37#define GOOD_COPY_LEN 128 38#define GOOD_COPY_LEN 128
38 39
39struct virtnet_info 40struct virtnet_info
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 2dc241689d37..0dbd85b0162d 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -622,7 +622,7 @@ static void hss_hdlc_rx_irq(void *pdev)
622 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name); 622 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
623#endif 623#endif
624 qmgr_disable_irq(queue_ids[port->id].rx); 624 qmgr_disable_irq(queue_ids[port->id].rx);
625 netif_rx_schedule(dev, &port->napi); 625 netif_rx_schedule(&port->napi);
626} 626}
627 627
628static int hss_hdlc_poll(struct napi_struct *napi, int budget) 628static int hss_hdlc_poll(struct napi_struct *napi, int budget)
@@ -651,7 +651,7 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
651 printk(KERN_DEBUG "%s: hss_hdlc_poll" 651 printk(KERN_DEBUG "%s: hss_hdlc_poll"
652 " netif_rx_complete\n", dev->name); 652 " netif_rx_complete\n", dev->name);
653#endif 653#endif
654 netif_rx_complete(dev, napi); 654 netif_rx_complete(napi);
655 qmgr_enable_irq(rxq); 655 qmgr_enable_irq(rxq);
656 if (!qmgr_stat_empty(rxq) && 656 if (!qmgr_stat_empty(rxq) &&
657 netif_rx_reschedule(napi)) { 657 netif_rx_reschedule(napi)) {
@@ -1069,7 +1069,7 @@ static int hss_hdlc_open(struct net_device *dev)
1069 hss_start_hdlc(port); 1069 hss_start_hdlc(port);
1070 1070
1071 /* we may already have RX data, enables IRQ */ 1071 /* we may already have RX data, enables IRQ */
1072 netif_rx_schedule(dev, &port->napi); 1072 netif_rx_schedule(&port->napi);
1073 return 0; 1073 return 0;
1074 1074
1075err_unlock: 1075err_unlock:
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index d3d37fed6893..15d9f51b292c 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -609,7 +609,7 @@ void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
609 spin_lock_irqsave(&i2400m->rx_lock, flags); 609 spin_lock_irqsave(&i2400m->rx_lock, flags);
610 ack_skb = i2400m->ack_skb; 610 ack_skb = i2400m->ack_skb;
611 if (ack_skb && !IS_ERR(ack_skb)) 611 if (ack_skb && !IS_ERR(ack_skb))
612 kfree(ack_skb); 612 kfree_skb(ack_skb);
613 i2400m->ack_skb = ERR_PTR(code); 613 i2400m->ack_skb = ERR_PTR(code);
614 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 614 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
615} 615}
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index 074cc1f89853..a314799967cf 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -184,6 +184,8 @@ void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
184 * NOTE: this function might realloc the skb (if it is too small), 184 * NOTE: this function might realloc the skb (if it is too small),
185 * so always update with the one returned. 185 * so always update with the one returned.
186 * ERR_PTR() is < 0 on error. 186 * ERR_PTR() is < 0 on error.
187 * Will return NULL if it cannot reallocate -- this can be
188 * considered a transient retryable error.
187 */ 189 */
188static 190static
189struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb) 191struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
@@ -243,8 +245,8 @@ retry:
243 if (printk_ratelimit()) 245 if (printk_ratelimit())
244 dev_err(dev, "RX: Can't reallocate skb to %d; " 246 dev_err(dev, "RX: Can't reallocate skb to %d; "
245 "RX dropped\n", rx_size); 247 "RX dropped\n", rx_size);
246 kfree(rx_skb); 248 kfree_skb(rx_skb);
247 result = 0; 249 rx_skb = NULL;
248 goto out; /* drop it...*/ 250 goto out; /* drop it...*/
249 } 251 }
250 kfree_skb(rx_skb); 252 kfree_skb(rx_skb);
@@ -344,7 +346,8 @@ int i2400mu_rxd(void *_i2400mu)
344 if (IS_ERR(rx_skb)) 346 if (IS_ERR(rx_skb))
345 goto out; 347 goto out;
346 atomic_dec(&i2400mu->rx_pending_count); 348 atomic_dec(&i2400mu->rx_pending_count);
347 if (rx_skb->len == 0) { /* some ignorable condition */ 349 if (rx_skb == NULL || rx_skb->len == 0) {
350 /* some "ignorable" condition */
348 kfree_skb(rx_skb); 351 kfree_skb(rx_skb);
349 continue; 352 continue;
350 } 353 }
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index ea543fcf2687..e4f9f747de88 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -111,7 +111,7 @@ config WLAN_80211
111 lets you choose drivers. 111 lets you choose drivers.
112 112
113config PCMCIA_RAYCS 113config PCMCIA_RAYCS
114 tristate "Aviator/Raytheon 2.4MHz wireless support" 114 tristate "Aviator/Raytheon 2.4GHz wireless support"
115 depends on PCMCIA && WLAN_80211 115 depends on PCMCIA && WLAN_80211
116 select WIRELESS_EXT 116 select WIRELESS_EXT
117 ---help--- 117 ---help---
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 4af2607deec0..8ef87356e083 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -2644,7 +2644,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2644 if (skb_headroom(skb) < padsize) { 2644 if (skb_headroom(skb) < padsize) {
2645 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough" 2645 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
2646 " headroom to pad %d\n", hdrlen, padsize); 2646 " headroom to pad %d\n", hdrlen, padsize);
2647 return -1; 2647 return NETDEV_TX_BUSY;
2648 } 2648 }
2649 skb_push(skb, padsize); 2649 skb_push(skb, padsize);
2650 memmove(skb->data, skb->data+padsize, hdrlen); 2650 memmove(skb->data, skb->data+padsize, hdrlen);
@@ -2655,7 +2655,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2655 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); 2655 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
2656 spin_unlock_irqrestore(&sc->txbuflock, flags); 2656 spin_unlock_irqrestore(&sc->txbuflock, flags);
2657 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); 2657 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
2658 return -1; 2658 return NETDEV_TX_BUSY;
2659 } 2659 }
2660 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); 2660 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
2661 list_del(&bf->list); 2661 list_del(&bf->list);
@@ -2673,10 +2673,10 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2673 sc->txbuf_len++; 2673 sc->txbuf_len++;
2674 spin_unlock_irqrestore(&sc->txbuflock, flags); 2674 spin_unlock_irqrestore(&sc->txbuflock, flags);
2675 dev_kfree_skb_any(skb); 2675 dev_kfree_skb_any(skb);
2676 return 0; 2676 return NETDEV_TX_OK;
2677 } 2677 }
2678 2678
2679 return 0; 2679 return NETDEV_TX_OK;
2680} 2680}
2681 2681
2682static int 2682static int
diff --git a/drivers/net/wireless/ath5k/pcu.c b/drivers/net/wireless/ath5k/pcu.c
index 0cac05c6a9ce..75eb9f43c741 100644
--- a/drivers/net/wireless/ath5k/pcu.c
+++ b/drivers/net/wireless/ath5k/pcu.c
@@ -65,7 +65,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
65 if (ah->ah_version == AR5K_AR5210) 65 if (ah->ah_version == AR5K_AR5210)
66 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL; 66 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
67 else 67 else
68 AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_ADHOC); 68 AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
69 break; 69 break;
70 70
71 case NL80211_IFTYPE_AP: 71 case NL80211_IFTYPE_AP:
@@ -75,7 +75,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
75 if (ah->ah_version == AR5K_AR5210) 75 if (ah->ah_version == AR5K_AR5210)
76 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL; 76 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
77 else 77 else
78 AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_ADHOC); 78 AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
79 break; 79 break;
80 80
81 case NL80211_IFTYPE_STATION: 81 case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 91aaeaf88199..9189ab13286c 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -73,7 +73,7 @@
73#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */ 73#define AR5K_CFG_SWRD 0x00000004 /* Byte-swap RX descriptor */
74#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer */ 74#define AR5K_CFG_SWRB 0x00000008 /* Byte-swap RX buffer */
75#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register access */ 75#define AR5K_CFG_SWRG 0x00000010 /* Byte-swap Register access */
76#define AR5K_CFG_ADHOC 0x00000020 /* AP/Adhoc indication [5211+] */ 76#define AR5K_CFG_IBSS 0x00000020 /* 0-BSS, 1-IBSS [5211+] */
77#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */ 77#define AR5K_CFG_PHY_OK 0x00000100 /* [5211+] */
78#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */ 78#define AR5K_CFG_EEBS 0x00000200 /* EEPROM is busy */
79#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (Disable dynamic clock) */ 79#define AR5K_CFG_CLKGD 0x00000400 /* Clock gated (Disable dynamic clock) */
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
index c43bd321f97f..90a8dd873786 100644
--- a/drivers/net/wireless/ath9k/Kconfig
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -1,6 +1,7 @@
1config ATH9K 1config ATH9K
2 tristate "Atheros 802.11n wireless cards support" 2 tristate "Atheros 802.11n wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211 3 depends on PCI && MAC80211 && WLAN_80211
4 depends on RFKILL || RFKILL=n
4 select MAC80211_LEDS 5 select MAC80211_LEDS
5 select LEDS_CLASS 6 select LEDS_CLASS
6 select NEW_LEDS 7 select NEW_LEDS
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 191eec50dc75..727f067aca4f 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -2164,13 +2164,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2164 conf->ht.channel_type); 2164 conf->ht.channel_type);
2165 } 2165 }
2166 2166
2167 ath_update_chainmask(sc, conf->ht.enabled);
2168
2167 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0) { 2169 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0) {
2168 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n"); 2170 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
2169 mutex_unlock(&sc->mutex); 2171 mutex_unlock(&sc->mutex);
2170 return -EINVAL; 2172 return -EINVAL;
2171 } 2173 }
2172
2173 ath_update_chainmask(sc, conf->ht.enabled);
2174 } 2174 }
2175 2175
2176 if (changed & IEEE80211_CONF_CHANGE_POWER) 2176 if (changed & IEEE80211_CONF_CHANGE_POWER)
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 04ab457a8faa..1b71b934bb5e 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -490,7 +490,7 @@ static inline int ath_rc_get_nextvalid_txrate(struct ath_rate_table *rate_table,
490 490
491static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw) 491static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
492{ 492{
493 if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG)) 493 if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG))
494 return 0; 494 return 0;
495 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG)) 495 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
496 return 0; 496 return 0;
diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h
index 9112c030b1e8..6df1b3b77c25 100644
--- a/drivers/net/wireless/ath9k/regd_common.h
+++ b/drivers/net/wireless/ath9k/regd_common.h
@@ -228,7 +228,7 @@ enum {
228}; 228};
229 229
230#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \ 230#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \
231 (!(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB))) 231 (~(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB)))
232#define REG_DOMAIN_5GHZ_MASK REQ_MASK 232#define REG_DOMAIN_5GHZ_MASK REQ_MASK
233 233
234static struct reg_dmn_pair_mapping regDomainPairs[] = { 234static struct reg_dmn_pair_mapping regDomainPairs[] = {
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 3bfc3b90f256..c92f0c6e4adc 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -126,15 +126,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
126 tx_info->flags |= IEEE80211_TX_STAT_ACK; 126 tx_info->flags |= IEEE80211_TX_STAT_ACK;
127 } 127 }
128 128
129 tx_info->status.rates[0].count = tx_status->retries; 129 tx_info->status.rates[0].count = tx_status->retries + 1;
130 if (tx_info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
131 /* Change idx from internal table index to MCS index */
132 int idx = tx_info->status.rates[0].idx;
133 struct ath_rate_table *rate_table = sc->cur_rate_table;
134 if (idx >= 0 && idx < rate_table->rate_cnt)
135 tx_info->status.rates[0].idx =
136 rate_table->info[idx].ratecode & 0x7f;
137 }
138 130
139 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 131 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
140 padsize = hdrlen & 3; 132 padsize = hdrlen & 3;
@@ -264,25 +256,22 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
264 } 256 }
265 257
266 /* Get seqno */ 258 /* Get seqno */
267 259 /* For HT capable stations, we save tidno for later use.
268 if (ieee80211_is_data(fc) && !is_pae(skb)) { 260 * We also override seqno set by upper layer with the one
269 /* For HT capable stations, we save tidno for later use. 261 * in tx aggregation state.
270 * We also override seqno set by upper layer with the one 262 *
271 * in tx aggregation state. 263 * If fragmentation is on, the sequence number is
272 * 264 * not overridden, since it has been
273 * If fragmentation is on, the sequence number is 265 * incremented by the fragmentation routine.
274 * not overridden, since it has been 266 *
275 * incremented by the fragmentation routine. 267 * FIXME: check if the fragmentation threshold exceeds
276 * 268 * IEEE80211 max.
277 * FIXME: check if the fragmentation threshold exceeds 269 */
278 * IEEE80211 max. 270 tid = ATH_AN_2_TID(an, bf->bf_tidno);
279 */ 271 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
280 tid = ATH_AN_2_TID(an, bf->bf_tidno); 272 IEEE80211_SEQ_SEQ_SHIFT);
281 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << 273 bf->bf_seqno = tid->seq_next;
282 IEEE80211_SEQ_SEQ_SHIFT); 274 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
283 bf->bf_seqno = tid->seq_next;
284 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
285 }
286} 275}
287 276
288static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb, 277static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
@@ -1718,11 +1707,10 @@ static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1718 1707
1719 /* Assign seqno, tidno */ 1708 /* Assign seqno, tidno */
1720 1709
1721 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR)) 1710 if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
1722 assign_aggr_tid_seqno(skb, bf); 1711 assign_aggr_tid_seqno(skb, bf);
1723 1712
1724 /* DMA setup */ 1713 /* DMA setup */
1725
1726 bf->bf_mpdu = skb; 1714 bf->bf_mpdu = skb;
1727 1715
1728 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data, 1716 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7b31a327b24a..c788bad10661 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3261,7 +3261,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
3261 struct b43_wldev *down_dev; 3261 struct b43_wldev *down_dev;
3262 struct b43_wldev *d; 3262 struct b43_wldev *d;
3263 int err; 3263 int err;
3264 bool gmode; 3264 bool uninitialized_var(gmode);
3265 int prev_status; 3265 int prev_status;
3266 3266
3267 /* Find a device and PHY which supports the band. */ 3267 /* Find a device and PHY which supports the band. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index c1324e31d2f6..fb996c27a19b 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2465,7 +2465,7 @@ static void b43legacy_put_phy_into_reset(struct b43legacy_wldev *dev)
2465static int b43legacy_switch_phymode(struct b43legacy_wl *wl, 2465static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
2466 unsigned int new_mode) 2466 unsigned int new_mode)
2467{ 2467{
2468 struct b43legacy_wldev *up_dev; 2468 struct b43legacy_wldev *uninitialized_var(up_dev);
2469 struct b43legacy_wldev *down_dev; 2469 struct b43legacy_wldev *down_dev;
2470 int err; 2470 int err;
2471 bool gmode = 0; 2471 bool gmode = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 9b60a0c5de5f..21c841847d88 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -638,12 +638,16 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
638 s8 scale_action = 0; 638 s8 scale_action = 0;
639 unsigned long flags; 639 unsigned long flags;
640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
641 u16 fc, rate_mask; 641 u16 fc;
642 u16 rate_mask = 0;
642 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r; 643 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r;
643 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 644 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
644 645
645 IWL_DEBUG_RATE("enter\n"); 646 IWL_DEBUG_RATE("enter\n");
646 647
648 if (sta)
649 rate_mask = sta->supp_rates[sband->band];
650
647 /* Send management frames and broadcast/multicast data using lowest 651 /* Send management frames and broadcast/multicast data using lowest
648 * rate. */ 652 * rate. */
649 fc = le16_to_cpu(hdr->frame_control); 653 fc = le16_to_cpu(hdr->frame_control);
@@ -651,11 +655,15 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
651 is_multicast_ether_addr(hdr->addr1) || 655 is_multicast_ether_addr(hdr->addr1) ||
652 !sta || !priv_sta) { 656 !sta || !priv_sta) {
653 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 657 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
654 info->control.rates[0].idx = rate_lowest_index(sband, sta); 658 if (!rate_mask)
659 info->control.rates[0].idx =
660 rate_lowest_index(sband, NULL);
661 else
662 info->control.rates[0].idx =
663 rate_lowest_index(sband, sta);
655 return; 664 return;
656 } 665 }
657 666
658 rate_mask = sta->supp_rates[sband->band];
659 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1); 667 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1);
660 668
661 if (sband->band == IEEE80211_BAND_5GHZ) 669 if (sband->band == IEEE80211_BAND_5GHZ)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 8fdb34222c0a..45cfa1cf194a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2219,7 +2219,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl3945_priv *priv)
2219 /* set tx power value for all OFDM rates */ 2219 /* set tx power value for all OFDM rates */
2220 for (rate_index = 0; rate_index < IWL_OFDM_RATES; 2220 for (rate_index = 0; rate_index < IWL_OFDM_RATES;
2221 rate_index++) { 2221 rate_index++) {
2222 s32 power_idx; 2222 s32 uninitialized_var(power_idx);
2223 int rc; 2223 int rc;
2224 2224
2225 /* use channel group's clip-power table, 2225 /* use channel group's clip-power table,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index f3f17929ca0b..27f50471aed8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -944,7 +944,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
944 } 944 }
945 945
946 /* See if there's a better rate or modulation mode to try. */ 946 /* See if there's a better rate or modulation mode to try. */
947 rs_rate_scale_perform(priv, hdr, sta, lq_sta); 947 if (sta && sta->supp_rates[sband->band])
948 rs_rate_scale_perform(priv, hdr, sta, lq_sta);
948out: 949out:
949 return; 950 return;
950} 951}
@@ -2101,14 +2102,23 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2101 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2102 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2102 struct iwl_lq_sta *lq_sta = priv_sta; 2103 struct iwl_lq_sta *lq_sta = priv_sta;
2103 int rate_idx; 2104 int rate_idx;
2105 u64 mask_bit = 0;
2104 2106
2105 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2107 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
2106 2108
2109 if (sta)
2110 mask_bit = sta->supp_rates[sband->band];
2111
2107 /* Send management frames and broadcast/multicast data using lowest 2112 /* Send management frames and broadcast/multicast data using lowest
2108 * rate. */ 2113 * rate. */
2109 if (!ieee80211_is_data(hdr->frame_control) || 2114 if (!ieee80211_is_data(hdr->frame_control) ||
2110 is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) { 2115 is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) {
2111 info->control.rates[0].idx = rate_lowest_index(sband, sta); 2116 if (!mask_bit)
2117 info->control.rates[0].idx =
2118 rate_lowest_index(sband, NULL);
2119 else
2120 info->control.rates[0].idx =
2121 rate_lowest_index(sband, sta);
2112 return; 2122 return;
2113 } 2123 }
2114 2124
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 5da6b35cd26d..0dc8eed16404 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2482,7 +2482,7 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2482 dev_kfree_skb_any(skb); 2482 dev_kfree_skb_any(skb);
2483 2483
2484 IWL_DEBUG_MACDUMP("leave\n"); 2484 IWL_DEBUG_MACDUMP("leave\n");
2485 return 0; 2485 return NETDEV_TX_OK;
2486} 2486}
2487 2487
2488static int iwl_mac_add_interface(struct ieee80211_hw *hw, 2488static int iwl_mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 52966ffbef6e..ba997204c8d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -255,7 +255,7 @@ struct iwl_cmd_header {
255 * 0x3) 54 Mbps 255 * 0x3) 54 Mbps
256 * 256 *
257 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"): 257 * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
258 * 3-0: 10) 1 Mbps 258 * 6-0: 10) 1 Mbps
259 * 20) 2 Mbps 259 * 20) 2 Mbps
260 * 55) 5.5 Mbps 260 * 55) 5.5 Mbps
261 * 110) 11 Mbps 261 * 110) 11 Mbps
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 01a2169cecec..4b35b30e493e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -51,6 +51,7 @@ const char *get_cmd_string(u8 cmd)
51 IWL_CMD(REPLY_REMOVE_STA); 51 IWL_CMD(REPLY_REMOVE_STA);
52 IWL_CMD(REPLY_REMOVE_ALL_STA); 52 IWL_CMD(REPLY_REMOVE_ALL_STA);
53 IWL_CMD(REPLY_WEPKEY); 53 IWL_CMD(REPLY_WEPKEY);
54 IWL_CMD(REPLY_3945_RX);
54 IWL_CMD(REPLY_TX); 55 IWL_CMD(REPLY_TX);
55 IWL_CMD(REPLY_RATE_SCALE); 56 IWL_CMD(REPLY_RATE_SCALE);
56 IWL_CMD(REPLY_LEDS_CMD); 57 IWL_CMD(REPLY_LEDS_CMD);
@@ -223,7 +224,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
223 IWL_ERROR("Error: Response NULL in '%s'\n", 224 IWL_ERROR("Error: Response NULL in '%s'\n",
224 get_cmd_string(cmd->id)); 225 get_cmd_string(cmd->id));
225 ret = -EIO; 226 ret = -EIO;
226 goto out; 227 goto cancel;
227 } 228 }
228 229
229 ret = 0; 230 ret = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d64580805d6e..95d01984c80e 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -745,7 +745,7 @@ static int iwl3945_send_cmd_sync(struct iwl3945_priv *priv, struct iwl3945_host_
745 IWL_ERROR("Error: Response NULL in '%s'\n", 745 IWL_ERROR("Error: Response NULL in '%s'\n",
746 get_cmd_string(cmd->id)); 746 get_cmd_string(cmd->id));
747 ret = -EIO; 747 ret = -EIO;
748 goto out; 748 goto cancel;
749 } 749 }
750 750
751 ret = 0; 751 ret = 0;
@@ -6538,7 +6538,7 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
6538 dev_kfree_skb_any(skb); 6538 dev_kfree_skb_any(skb);
6539 6539
6540 IWL_DEBUG_MAC80211("leave\n"); 6540 IWL_DEBUG_MAC80211("leave\n");
6541 return 0; 6541 return NETDEV_TX_OK;
6542} 6542}
6543 6543
6544static int iwl3945_mac_add_interface(struct ieee80211_hw *hw, 6544static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index e173b1b46c23..f6a79a653b7b 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -32,7 +32,7 @@ struct txpd {
32 u8 pktdelay_2ms; 32 u8 pktdelay_2ms;
33 /* reserved */ 33 /* reserved */
34 u8 reserved1; 34 u8 reserved1;
35}; 35} __attribute__ ((packed));
36 36
37/* RxPD Descriptor */ 37/* RxPD Descriptor */
38struct rxpd { 38struct rxpd {
@@ -63,7 +63,7 @@ struct rxpd {
63 /* Pkt Priority */ 63 /* Pkt Priority */
64 u8 priority; 64 u8 priority;
65 u8 reserved[3]; 65 u8 reserved[3];
66}; 66} __attribute__ ((packed));
67 67
68struct cmd_header { 68struct cmd_header {
69 __le16 command; 69 __le16 command;
@@ -97,7 +97,7 @@ struct enc_key {
97struct lbs_offset_value { 97struct lbs_offset_value {
98 u32 offset; 98 u32 offset;
99 u32 value; 99 u32 value;
100}; 100} __attribute__ ((packed));
101 101
102/* Define general data structure */ 102/* Define general data structure */
103/* cmd_DS_GEN */ 103/* cmd_DS_GEN */
@@ -107,7 +107,7 @@ struct cmd_ds_gen {
107 __le16 seqnum; 107 __le16 seqnum;
108 __le16 result; 108 __le16 result;
109 void *cmdresp[0]; 109 void *cmdresp[0];
110}; 110} __attribute__ ((packed));
111 111
112#define S_DS_GEN sizeof(struct cmd_ds_gen) 112#define S_DS_GEN sizeof(struct cmd_ds_gen)
113 113
@@ -163,7 +163,7 @@ struct cmd_ds_802_11_subscribe_event {
163 * bump this up a bit. 163 * bump this up a bit.
164 */ 164 */
165 uint8_t tlv[128]; 165 uint8_t tlv[128];
166}; 166} __attribute__ ((packed));
167 167
168/* 168/*
169 * This scan handle Country Information IE(802.11d compliant) 169 * This scan handle Country Information IE(802.11d compliant)
@@ -180,7 +180,7 @@ struct cmd_ds_802_11_scan {
180 mrvlietypes_chanlistparamset_t ChanListParamSet; 180 mrvlietypes_chanlistparamset_t ChanListParamSet;
181 mrvlietypes_ratesparamset_t OpRateSet; 181 mrvlietypes_ratesparamset_t OpRateSet;
182#endif 182#endif
183}; 183} __attribute__ ((packed));
184 184
185struct cmd_ds_802_11_scan_rsp { 185struct cmd_ds_802_11_scan_rsp {
186 struct cmd_header hdr; 186 struct cmd_header hdr;
@@ -188,7 +188,7 @@ struct cmd_ds_802_11_scan_rsp {
188 __le16 bssdescriptsize; 188 __le16 bssdescriptsize;
189 uint8_t nr_sets; 189 uint8_t nr_sets;
190 uint8_t bssdesc_and_tlvbuffer[0]; 190 uint8_t bssdesc_and_tlvbuffer[0];
191}; 191} __attribute__ ((packed));
192 192
193struct cmd_ds_802_11_get_log { 193struct cmd_ds_802_11_get_log {
194 struct cmd_header hdr; 194 struct cmd_header hdr;
@@ -206,33 +206,33 @@ struct cmd_ds_802_11_get_log {
206 __le32 fcserror; 206 __le32 fcserror;
207 __le32 txframe; 207 __le32 txframe;
208 __le32 wepundecryptable; 208 __le32 wepundecryptable;
209}; 209} __attribute__ ((packed));
210 210
211struct cmd_ds_mac_control { 211struct cmd_ds_mac_control {
212 struct cmd_header hdr; 212 struct cmd_header hdr;
213 __le16 action; 213 __le16 action;
214 u16 reserved; 214 u16 reserved;
215}; 215} __attribute__ ((packed));
216 216
217struct cmd_ds_mac_multicast_adr { 217struct cmd_ds_mac_multicast_adr {
218 struct cmd_header hdr; 218 struct cmd_header hdr;
219 __le16 action; 219 __le16 action;
220 __le16 nr_of_adrs; 220 __le16 nr_of_adrs;
221 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE]; 221 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
222}; 222} __attribute__ ((packed));
223 223
224struct cmd_ds_802_11_authenticate { 224struct cmd_ds_802_11_authenticate {
225 u8 macaddr[ETH_ALEN]; 225 u8 macaddr[ETH_ALEN];
226 u8 authtype; 226 u8 authtype;
227 u8 reserved[10]; 227 u8 reserved[10];
228}; 228} __attribute__ ((packed));
229 229
230struct cmd_ds_802_11_deauthenticate { 230struct cmd_ds_802_11_deauthenticate {
231 struct cmd_header hdr; 231 struct cmd_header hdr;
232 232
233 u8 macaddr[ETH_ALEN]; 233 u8 macaddr[ETH_ALEN];
234 __le16 reasoncode; 234 __le16 reasoncode;
235}; 235} __attribute__ ((packed));
236 236
237struct cmd_ds_802_11_associate { 237struct cmd_ds_802_11_associate {
238 u8 peerstaaddr[6]; 238 u8 peerstaaddr[6];
@@ -251,7 +251,7 @@ struct cmd_ds_802_11_associate {
251 251
252struct cmd_ds_802_11_associate_rsp { 252struct cmd_ds_802_11_associate_rsp {
253 struct ieeetypes_assocrsp assocRsp; 253 struct ieeetypes_assocrsp assocRsp;
254}; 254} __attribute__ ((packed));
255 255
256struct cmd_ds_802_11_set_wep { 256struct cmd_ds_802_11_set_wep {
257 struct cmd_header hdr; 257 struct cmd_header hdr;
@@ -265,7 +265,7 @@ struct cmd_ds_802_11_set_wep {
265 /* 40, 128bit or TXWEP */ 265 /* 40, 128bit or TXWEP */
266 uint8_t keytype[4]; 266 uint8_t keytype[4];
267 uint8_t keymaterial[4][16]; 267 uint8_t keymaterial[4][16];
268}; 268} __attribute__ ((packed));
269 269
270struct cmd_ds_802_3_get_stat { 270struct cmd_ds_802_3_get_stat {
271 __le32 xmitok; 271 __le32 xmitok;
@@ -274,7 +274,7 @@ struct cmd_ds_802_3_get_stat {
274 __le32 rcverror; 274 __le32 rcverror;
275 __le32 rcvnobuffer; 275 __le32 rcvnobuffer;
276 __le32 rcvcrcerror; 276 __le32 rcvcrcerror;
277}; 277} __attribute__ ((packed));
278 278
279struct cmd_ds_802_11_get_stat { 279struct cmd_ds_802_11_get_stat {
280 __le32 txfragmentcnt; 280 __le32 txfragmentcnt;
@@ -294,7 +294,7 @@ struct cmd_ds_802_11_get_stat {
294 __le32 txbeacon; 294 __le32 txbeacon;
295 __le32 rxbeacon; 295 __le32 rxbeacon;
296 __le32 wepundecryptable; 296 __le32 wepundecryptable;
297}; 297} __attribute__ ((packed));
298 298
299struct cmd_ds_802_11_snmp_mib { 299struct cmd_ds_802_11_snmp_mib {
300 struct cmd_header hdr; 300 struct cmd_header hdr;
@@ -303,58 +303,58 @@ struct cmd_ds_802_11_snmp_mib {
303 __le16 oid; 303 __le16 oid;
304 __le16 bufsize; 304 __le16 bufsize;
305 u8 value[128]; 305 u8 value[128];
306}; 306} __attribute__ ((packed));
307 307
308struct cmd_ds_mac_reg_map { 308struct cmd_ds_mac_reg_map {
309 __le16 buffersize; 309 __le16 buffersize;
310 u8 regmap[128]; 310 u8 regmap[128];
311 __le16 reserved; 311 __le16 reserved;
312}; 312} __attribute__ ((packed));
313 313
314struct cmd_ds_bbp_reg_map { 314struct cmd_ds_bbp_reg_map {
315 __le16 buffersize; 315 __le16 buffersize;
316 u8 regmap[128]; 316 u8 regmap[128];
317 __le16 reserved; 317 __le16 reserved;
318}; 318} __attribute__ ((packed));
319 319
320struct cmd_ds_rf_reg_map { 320struct cmd_ds_rf_reg_map {
321 __le16 buffersize; 321 __le16 buffersize;
322 u8 regmap[64]; 322 u8 regmap[64];
323 __le16 reserved; 323 __le16 reserved;
324}; 324} __attribute__ ((packed));
325 325
326struct cmd_ds_mac_reg_access { 326struct cmd_ds_mac_reg_access {
327 __le16 action; 327 __le16 action;
328 __le16 offset; 328 __le16 offset;
329 __le32 value; 329 __le32 value;
330}; 330} __attribute__ ((packed));
331 331
332struct cmd_ds_bbp_reg_access { 332struct cmd_ds_bbp_reg_access {
333 __le16 action; 333 __le16 action;
334 __le16 offset; 334 __le16 offset;
335 u8 value; 335 u8 value;
336 u8 reserved[3]; 336 u8 reserved[3];
337}; 337} __attribute__ ((packed));
338 338
339struct cmd_ds_rf_reg_access { 339struct cmd_ds_rf_reg_access {
340 __le16 action; 340 __le16 action;
341 __le16 offset; 341 __le16 offset;
342 u8 value; 342 u8 value;
343 u8 reserved[3]; 343 u8 reserved[3];
344}; 344} __attribute__ ((packed));
345 345
346struct cmd_ds_802_11_radio_control { 346struct cmd_ds_802_11_radio_control {
347 struct cmd_header hdr; 347 struct cmd_header hdr;
348 348
349 __le16 action; 349 __le16 action;
350 __le16 control; 350 __le16 control;
351}; 351} __attribute__ ((packed));
352 352
353struct cmd_ds_802_11_beacon_control { 353struct cmd_ds_802_11_beacon_control {
354 __le16 action; 354 __le16 action;
355 __le16 beacon_enable; 355 __le16 beacon_enable;
356 __le16 beacon_period; 356 __le16 beacon_period;
357}; 357} __attribute__ ((packed));
358 358
359struct cmd_ds_802_11_sleep_params { 359struct cmd_ds_802_11_sleep_params {
360 struct cmd_header hdr; 360 struct cmd_header hdr;
@@ -379,7 +379,7 @@ struct cmd_ds_802_11_sleep_params {
379 379
380 /* reserved field, should be set to zero */ 380 /* reserved field, should be set to zero */
381 __le16 reserved; 381 __le16 reserved;
382}; 382} __attribute__ ((packed));
383 383
384struct cmd_ds_802_11_inactivity_timeout { 384struct cmd_ds_802_11_inactivity_timeout {
385 struct cmd_header hdr; 385 struct cmd_header hdr;
@@ -389,7 +389,7 @@ struct cmd_ds_802_11_inactivity_timeout {
389 389
390 /* Inactivity timeout in msec */ 390 /* Inactivity timeout in msec */
391 __le16 timeout; 391 __le16 timeout;
392}; 392} __attribute__ ((packed));
393 393
394struct cmd_ds_802_11_rf_channel { 394struct cmd_ds_802_11_rf_channel {
395 struct cmd_header hdr; 395 struct cmd_header hdr;
@@ -399,7 +399,7 @@ struct cmd_ds_802_11_rf_channel {
399 __le16 rftype; /* unused */ 399 __le16 rftype; /* unused */
400 __le16 reserved; /* unused */ 400 __le16 reserved; /* unused */
401 u8 channellist[32]; /* unused */ 401 u8 channellist[32]; /* unused */
402}; 402} __attribute__ ((packed));
403 403
404struct cmd_ds_802_11_rssi { 404struct cmd_ds_802_11_rssi {
405 /* weighting factor */ 405 /* weighting factor */
@@ -408,21 +408,21 @@ struct cmd_ds_802_11_rssi {
408 __le16 reserved_0; 408 __le16 reserved_0;
409 __le16 reserved_1; 409 __le16 reserved_1;
410 __le16 reserved_2; 410 __le16 reserved_2;
411}; 411} __attribute__ ((packed));
412 412
413struct cmd_ds_802_11_rssi_rsp { 413struct cmd_ds_802_11_rssi_rsp {
414 __le16 SNR; 414 __le16 SNR;
415 __le16 noisefloor; 415 __le16 noisefloor;
416 __le16 avgSNR; 416 __le16 avgSNR;
417 __le16 avgnoisefloor; 417 __le16 avgnoisefloor;
418}; 418} __attribute__ ((packed));
419 419
420struct cmd_ds_802_11_mac_address { 420struct cmd_ds_802_11_mac_address {
421 struct cmd_header hdr; 421 struct cmd_header hdr;
422 422
423 __le16 action; 423 __le16 action;
424 u8 macadd[ETH_ALEN]; 424 u8 macadd[ETH_ALEN];
425}; 425} __attribute__ ((packed));
426 426
427struct cmd_ds_802_11_rf_tx_power { 427struct cmd_ds_802_11_rf_tx_power {
428 struct cmd_header hdr; 428 struct cmd_header hdr;
@@ -431,7 +431,7 @@ struct cmd_ds_802_11_rf_tx_power {
431 __le16 curlevel; 431 __le16 curlevel;
432 s8 maxlevel; 432 s8 maxlevel;
433 s8 minlevel; 433 s8 minlevel;
434}; 434} __attribute__ ((packed));
435 435
436struct cmd_ds_802_11_rf_antenna { 436struct cmd_ds_802_11_rf_antenna {
437 __le16 action; 437 __le16 action;
@@ -439,33 +439,33 @@ struct cmd_ds_802_11_rf_antenna {
439 /* Number of antennas or 0xffff(diversity) */ 439 /* Number of antennas or 0xffff(diversity) */
440 __le16 antennamode; 440 __le16 antennamode;
441 441
442}; 442} __attribute__ ((packed));
443 443
444struct cmd_ds_802_11_monitor_mode { 444struct cmd_ds_802_11_monitor_mode {
445 __le16 action; 445 __le16 action;
446 __le16 mode; 446 __le16 mode;
447}; 447} __attribute__ ((packed));
448 448
449struct cmd_ds_set_boot2_ver { 449struct cmd_ds_set_boot2_ver {
450 struct cmd_header hdr; 450 struct cmd_header hdr;
451 451
452 __le16 action; 452 __le16 action;
453 __le16 version; 453 __le16 version;
454}; 454} __attribute__ ((packed));
455 455
456struct cmd_ds_802_11_fw_wake_method { 456struct cmd_ds_802_11_fw_wake_method {
457 struct cmd_header hdr; 457 struct cmd_header hdr;
458 458
459 __le16 action; 459 __le16 action;
460 __le16 method; 460 __le16 method;
461}; 461} __attribute__ ((packed));
462 462
463struct cmd_ds_802_11_sleep_period { 463struct cmd_ds_802_11_sleep_period {
464 struct cmd_header hdr; 464 struct cmd_header hdr;
465 465
466 __le16 action; 466 __le16 action;
467 __le16 period; 467 __le16 period;
468}; 468} __attribute__ ((packed));
469 469
470struct cmd_ds_802_11_ps_mode { 470struct cmd_ds_802_11_ps_mode {
471 __le16 action; 471 __le16 action;
@@ -473,7 +473,7 @@ struct cmd_ds_802_11_ps_mode {
473 __le16 multipledtim; 473 __le16 multipledtim;
474 __le16 reserved; 474 __le16 reserved;
475 __le16 locallisteninterval; 475 __le16 locallisteninterval;
476}; 476} __attribute__ ((packed));
477 477
478struct cmd_confirm_sleep { 478struct cmd_confirm_sleep {
479 struct cmd_header hdr; 479 struct cmd_header hdr;
@@ -483,7 +483,7 @@ struct cmd_confirm_sleep {
483 __le16 multipledtim; 483 __le16 multipledtim;
484 __le16 reserved; 484 __le16 reserved;
485 __le16 locallisteninterval; 485 __le16 locallisteninterval;
486}; 486} __attribute__ ((packed));
487 487
488struct cmd_ds_802_11_data_rate { 488struct cmd_ds_802_11_data_rate {
489 struct cmd_header hdr; 489 struct cmd_header hdr;
@@ -491,14 +491,14 @@ struct cmd_ds_802_11_data_rate {
491 __le16 action; 491 __le16 action;
492 __le16 reserved; 492 __le16 reserved;
493 u8 rates[MAX_RATES]; 493 u8 rates[MAX_RATES];
494}; 494} __attribute__ ((packed));
495 495
496struct cmd_ds_802_11_rate_adapt_rateset { 496struct cmd_ds_802_11_rate_adapt_rateset {
497 struct cmd_header hdr; 497 struct cmd_header hdr;
498 __le16 action; 498 __le16 action;
499 __le16 enablehwauto; 499 __le16 enablehwauto;
500 __le16 bitmap; 500 __le16 bitmap;
501}; 501} __attribute__ ((packed));
502 502
503struct cmd_ds_802_11_ad_hoc_start { 503struct cmd_ds_802_11_ad_hoc_start {
504 struct cmd_header hdr; 504 struct cmd_header hdr;
@@ -520,7 +520,7 @@ struct cmd_ds_802_11_ad_hoc_result {
520 520
521 u8 pad[3]; 521 u8 pad[3];
522 u8 bssid[ETH_ALEN]; 522 u8 bssid[ETH_ALEN];
523}; 523} __attribute__ ((packed));
524 524
525struct adhoc_bssdesc { 525struct adhoc_bssdesc {
526 u8 bssid[ETH_ALEN]; 526 u8 bssid[ETH_ALEN];
@@ -578,7 +578,7 @@ struct MrvlIEtype_keyParamSet {
578 578
579 /* key material of size keylen */ 579 /* key material of size keylen */
580 u8 key[32]; 580 u8 key[32];
581}; 581} __attribute__ ((packed));
582 582
583#define MAX_WOL_RULES 16 583#define MAX_WOL_RULES 16
584 584
@@ -590,7 +590,7 @@ struct host_wol_rule {
590 __le16 reserve; 590 __le16 reserve;
591 __be32 sig_mask; 591 __be32 sig_mask;
592 __be32 signature; 592 __be32 signature;
593}; 593} __attribute__ ((packed));
594 594
595struct wol_config { 595struct wol_config {
596 uint8_t action; 596 uint8_t action;
@@ -598,8 +598,7 @@ struct wol_config {
598 uint8_t no_rules_in_cmd; 598 uint8_t no_rules_in_cmd;
599 uint8_t result; 599 uint8_t result;
600 struct host_wol_rule rule[MAX_WOL_RULES]; 600 struct host_wol_rule rule[MAX_WOL_RULES];
601}; 601} __attribute__ ((packed));
602
603 602
604struct cmd_ds_host_sleep { 603struct cmd_ds_host_sleep {
605 struct cmd_header hdr; 604 struct cmd_header hdr;
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index d1fc305de5fe..e7289e2e7f16 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -206,7 +206,7 @@ static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
206 * there are no buffered multicast frames to send 206 * there are no buffered multicast frames to send
207 */ 207 */
208 ieee80211_stop_queues(priv->hw); 208 ieee80211_stop_queues(priv->hw);
209 return 0; 209 return NETDEV_TX_OK;
210} 210}
211 211
212static void lbtf_tx_work(struct work_struct *work) 212static void lbtf_tx_work(struct work_struct *work)
diff --git a/drivers/net/wireless/orinoco/orinoco.c b/drivers/net/wireless/orinoco/orinoco.c
index bc84e2792f8a..45a04faa7818 100644
--- a/drivers/net/wireless/orinoco/orinoco.c
+++ b/drivers/net/wireless/orinoco/orinoco.c
@@ -1610,6 +1610,16 @@ static void orinoco_rx_isr_tasklet(unsigned long data)
1610 struct orinoco_rx_data *rx_data, *temp; 1610 struct orinoco_rx_data *rx_data, *temp;
1611 struct hermes_rx_descriptor *desc; 1611 struct hermes_rx_descriptor *desc;
1612 struct sk_buff *skb; 1612 struct sk_buff *skb;
1613 unsigned long flags;
1614
1615 /* orinoco_rx requires the driver lock, and we also need to
1616 * protect priv->rx_list, so just hold the lock over the
1617 * lot.
1618 *
1619 * If orinoco_lock fails, we've unplugged the card. In this
1620 * case just abort. */
1621 if (orinoco_lock(priv, &flags) != 0)
1622 return;
1613 1623
1614 /* extract desc and skb from queue */ 1624 /* extract desc and skb from queue */
1615 list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) { 1625 list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
@@ -1622,6 +1632,8 @@ static void orinoco_rx_isr_tasklet(unsigned long data)
1622 1632
1623 kfree(desc); 1633 kfree(desc);
1624 } 1634 }
1635
1636 orinoco_unlock(priv, &flags);
1625} 1637}
1626 1638
1627/********************************************************************/ 1639/********************************************************************/
@@ -1661,7 +1673,7 @@ static void print_linkstatus(struct net_device *dev, u16 status)
1661 s = "UNKNOWN"; 1673 s = "UNKNOWN";
1662 } 1674 }
1663 1675
1664 printk(KERN_INFO "%s: New link status: %s (%04x)\n", 1676 printk(KERN_DEBUG "%s: New link status: %s (%04x)\n",
1665 dev->name, s, status); 1677 dev->name, s, status);
1666} 1678}
1667 1679
@@ -3645,12 +3657,22 @@ struct net_device
3645void free_orinocodev(struct net_device *dev) 3657void free_orinocodev(struct net_device *dev)
3646{ 3658{
3647 struct orinoco_private *priv = netdev_priv(dev); 3659 struct orinoco_private *priv = netdev_priv(dev);
3660 struct orinoco_rx_data *rx_data, *temp;
3648 3661
3649 /* No need to empty priv->rx_list: if the tasklet is scheduled 3662 /* If the tasklet is scheduled when we call tasklet_kill it
3650 * when we call tasklet_kill it will run one final time, 3663 * will run one final time. However the tasklet will only
3651 * emptying the list */ 3664 * drain priv->rx_list if the hw is still available. */
3652 tasklet_kill(&priv->rx_tasklet); 3665 tasklet_kill(&priv->rx_tasklet);
3653 3666
3667 /* Explicitly drain priv->rx_list */
3668 list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
3669 list_del(&rx_data->list);
3670
3671 dev_kfree_skb(rx_data->skb);
3672 kfree(rx_data->desc);
3673 kfree(rx_data);
3674 }
3675
3654 unregister_pm_notifier(&priv->pm_notifier); 3676 unregister_pm_notifier(&priv->pm_notifier);
3655 orinoco_uncache_fw(priv); 3677 orinoco_uncache_fw(priv);
3656 3678
@@ -5046,33 +5068,30 @@ static int orinoco_ioctl_set_genie(struct net_device *dev,
5046 struct orinoco_private *priv = netdev_priv(dev); 5068 struct orinoco_private *priv = netdev_priv(dev);
5047 u8 *buf; 5069 u8 *buf;
5048 unsigned long flags; 5070 unsigned long flags;
5049 int err = 0;
5050 5071
5051 /* cut off at IEEE80211_MAX_DATA_LEN */ 5072 /* cut off at IEEE80211_MAX_DATA_LEN */
5052 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) || 5073 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) ||
5053 (wrqu->data.length && (extra == NULL))) 5074 (wrqu->data.length && (extra == NULL)))
5054 return -EINVAL; 5075 return -EINVAL;
5055 5076
5056 if (orinoco_lock(priv, &flags) != 0)
5057 return -EBUSY;
5058
5059 if (wrqu->data.length) { 5077 if (wrqu->data.length) {
5060 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 5078 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
5061 if (buf == NULL) { 5079 if (buf == NULL)
5062 err = -ENOMEM; 5080 return -ENOMEM;
5063 goto out;
5064 }
5065 5081
5066 memcpy(buf, extra, wrqu->data.length); 5082 memcpy(buf, extra, wrqu->data.length);
5067 kfree(priv->wpa_ie); 5083 } else
5068 priv->wpa_ie = buf; 5084 buf = NULL;
5069 priv->wpa_ie_len = wrqu->data.length; 5085
5070 } else { 5086 if (orinoco_lock(priv, &flags) != 0) {
5071 kfree(priv->wpa_ie); 5087 kfree(buf);
5072 priv->wpa_ie = NULL; 5088 return -EBUSY;
5073 priv->wpa_ie_len = 0;
5074 } 5089 }
5075 5090
5091 kfree(priv->wpa_ie);
5092 priv->wpa_ie = buf;
5093 priv->wpa_ie_len = wrqu->data.length;
5094
5076 if (priv->wpa_ie) { 5095 if (priv->wpa_ie) {
5077 /* Looks like wl_lkm wants to check the auth alg, and 5096 /* Looks like wl_lkm wants to check the auth alg, and
5078 * somehow pass it to the firmware. 5097 * somehow pass it to the firmware.
@@ -5081,9 +5100,8 @@ static int orinoco_ioctl_set_genie(struct net_device *dev,
5081 */ 5100 */
5082 } 5101 }
5083 5102
5084out:
5085 orinoco_unlock(priv, &flags); 5103 orinoco_unlock(priv, &flags);
5086 return err; 5104 return 0;
5087} 5105}
5088 5106
5089static int orinoco_ioctl_get_genie(struct net_device *dev, 5107static int orinoco_ioctl_get_genie(struct net_device *dev,
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index f127602670ec..0b32215d3f5d 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -435,6 +435,7 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
435 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */ 435 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
436 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */ 436 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
437 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */ 437 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
438 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
438 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */ 439 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
439 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */ 440 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
440 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */ 441 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 82354b974a04..34561e6e816b 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -138,6 +138,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
138 u8 *fw_version = NULL; 138 u8 *fw_version = NULL;
139 size_t len; 139 size_t len;
140 int i; 140 int i;
141 int maxlen;
141 142
142 if (priv->rx_start) 143 if (priv->rx_start)
143 return 0; 144 return 0;
@@ -195,6 +196,16 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
195 else 196 else
196 priv->rx_mtu = (size_t) 197 priv->rx_mtu = (size_t)
197 0x620 - priv->tx_hdr_len; 198 0x620 - priv->tx_hdr_len;
199 maxlen = priv->tx_hdr_len + /* USB devices */
200 sizeof(struct p54_rx_data) +
201 4 + /* rx alignment */
202 IEEE80211_MAX_FRAG_THRESHOLD;
203 if (priv->rx_mtu > maxlen && PAGE_SIZE == 4096) {
204 printk(KERN_INFO "p54: rx_mtu reduced from %d "
205 "to %d\n", priv->rx_mtu,
206 maxlen);
207 priv->rx_mtu = maxlen;
208 }
198 break; 209 break;
199 } 210 }
200 case BR_CODE_EXPOSED_IF: 211 case BR_CODE_EXPOSED_IF:
@@ -440,8 +451,8 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
440 } 451 }
441 if (err) 452 if (err)
442 goto err; 453 goto err;
443 454 }
444 } 455 break;
445 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: 456 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
446 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); 457 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL);
447 if (!priv->iq_autocal) { 458 if (!priv->iq_autocal) {
@@ -575,6 +586,7 @@ static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
575 u16 freq = le16_to_cpu(hdr->freq); 586 u16 freq = le16_to_cpu(hdr->freq);
576 size_t header_len = sizeof(*hdr); 587 size_t header_len = sizeof(*hdr);
577 u32 tsf32; 588 u32 tsf32;
589 u8 rate = hdr->rate & 0xf;
578 590
579 /* 591 /*
580 * If the device is in a unspecified state we have to 592 * If the device is in a unspecified state we have to
@@ -603,8 +615,11 @@ static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
603 rx_status.qual = (100 * hdr->rssi) / 127; 615 rx_status.qual = (100 * hdr->rssi) / 127;
604 if (hdr->rate & 0x10) 616 if (hdr->rate & 0x10)
605 rx_status.flag |= RX_FLAG_SHORTPRE; 617 rx_status.flag |= RX_FLAG_SHORTPRE;
606 rx_status.rate_idx = (dev->conf.channel->band == IEEE80211_BAND_2GHZ ? 618 if (dev->conf.channel->band == IEEE80211_BAND_5GHZ)
607 hdr->rate : (hdr->rate - 4)) & 0xf; 619 rx_status.rate_idx = (rate < 4) ? 0 : rate - 4;
620 else
621 rx_status.rate_idx = rate;
622
608 rx_status.freq = freq; 623 rx_status.freq = freq;
609 rx_status.band = dev->conf.channel->band; 624 rx_status.band = dev->conf.channel->band;
610 rx_status.antenna = hdr->antenna; 625 rx_status.antenna = hdr->antenna;
@@ -730,7 +745,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
730 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 745 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
731 struct p54_hdr *entry_hdr; 746 struct p54_hdr *entry_hdr;
732 struct p54_tx_data *entry_data; 747 struct p54_tx_data *entry_data;
733 int pad = 0; 748 unsigned int pad = 0, frame_len;
734 749
735 range = (void *)info->rate_driver_data; 750 range = (void *)info->rate_driver_data;
736 if (range->start_addr != addr) { 751 if (range->start_addr != addr) {
@@ -753,6 +768,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
753 __skb_unlink(entry, &priv->tx_queue); 768 __skb_unlink(entry, &priv->tx_queue);
754 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 769 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
755 770
771 frame_len = entry->len;
756 entry_hdr = (struct p54_hdr *) entry->data; 772 entry_hdr = (struct p54_hdr *) entry->data;
757 entry_data = (struct p54_tx_data *) entry_hdr->data; 773 entry_data = (struct p54_tx_data *) entry_hdr->data;
758 priv->tx_stats[entry_data->hw_queue].len--; 774 priv->tx_stats[entry_data->hw_queue].len--;
@@ -798,6 +814,29 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
798 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 814 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
799 info->status.ack_signal = p54_rssi_to_dbm(dev, 815 info->status.ack_signal = p54_rssi_to_dbm(dev,
800 (int)payload->ack_rssi); 816 (int)payload->ack_rssi);
817
818 /* Undo all changes to the frame. */
819 switch (entry_data->key_type) {
820 case P54_CRYPTO_TKIPMICHAEL: {
821 u8 *iv = (u8 *)(entry_data->align + pad +
822 entry_data->crypt_offset);
823
824 /* Restore the original TKIP IV. */
825 iv[2] = iv[0];
826 iv[0] = iv[1];
827 iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */
828
829 frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */
830 break;
831 }
832 case P54_CRYPTO_AESCCMP:
833 frame_len -= 8; /* remove CCMP_MIC */
834 break;
835 case P54_CRYPTO_WEP:
836 frame_len -= 4; /* remove WEP_ICV */
837 break;
838 }
839 skb_trim(entry, frame_len);
801 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); 840 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
802 ieee80211_tx_status_irqsafe(dev, entry); 841 ieee80211_tx_status_irqsafe(dev, entry);
803 goto out; 842 goto out;
@@ -1122,7 +1161,7 @@ static int p54_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
1122 1161
1123 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, 1162 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET,
1124 sizeof(struct p54_hdr) + sizeof(*tim), 1163 sizeof(struct p54_hdr) + sizeof(*tim),
1125 P54_CONTROL_TYPE_TIM, GFP_KERNEL); 1164 P54_CONTROL_TYPE_TIM, GFP_ATOMIC);
1126 if (!skb) 1165 if (!skb)
1127 return -ENOMEM; 1166 return -ENOMEM;
1128 1167
@@ -1383,7 +1422,6 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1383 hdr->tries = ridx; 1422 hdr->tries = ridx;
1384 txhdr->rts_rate_idx = 0; 1423 txhdr->rts_rate_idx = 0;
1385 if (info->control.hw_key) { 1424 if (info->control.hw_key) {
1386 crypt_offset += info->control.hw_key->iv_len;
1387 txhdr->key_type = p54_convert_algo(info->control.hw_key->alg); 1425 txhdr->key_type = p54_convert_algo(info->control.hw_key->alg);
1388 txhdr->key_len = min((u8)16, info->control.hw_key->keylen); 1426 txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
1389 memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len); 1427 memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
@@ -1397,6 +1435,8 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1397 } 1435 }
1398 /* reserve some space for ICV */ 1436 /* reserve some space for ICV */
1399 len += info->control.hw_key->icv_len; 1437 len += info->control.hw_key->icv_len;
1438 memset(skb_put(skb, info->control.hw_key->icv_len), 0,
1439 info->control.hw_key->icv_len);
1400 } else { 1440 } else {
1401 txhdr->key_type = 0; 1441 txhdr->key_type = 0;
1402 txhdr->key_len = 0; 1442 txhdr->key_len = 0;
@@ -1584,7 +1624,7 @@ static int p54_scan(struct ieee80211_hw *dev, u16 mode, u16 dwell)
1584 1624
1585 err: 1625 err:
1586 printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy)); 1626 printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy));
1587 kfree_skb(skb); 1627 p54_free_skb(dev, skb);
1588 return -EINVAL; 1628 return -EINVAL;
1589} 1629}
1590 1630
@@ -1824,7 +1864,7 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
1824 1864
1825static int p54_config(struct ieee80211_hw *dev, u32 changed) 1865static int p54_config(struct ieee80211_hw *dev, u32 changed)
1826{ 1866{
1827 int ret; 1867 int ret = 0;
1828 struct p54_common *priv = dev->priv; 1868 struct p54_common *priv = dev->priv;
1829 struct ieee80211_conf *conf = &dev->conf; 1869 struct ieee80211_conf *conf = &dev->conf;
1830 1870
@@ -2051,7 +2091,7 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
2051 algo = P54_CRYPTO_AESCCMP; 2091 algo = P54_CRYPTO_AESCCMP;
2052 break; 2092 break;
2053 default: 2093 default:
2054 return -EINVAL; 2094 return -EOPNOTSUPP;
2055 } 2095 }
2056 } 2096 }
2057 2097
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index c44a200059d2..5de2ebfb28c7 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
56 {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ 56 {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
57 {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ 57 {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
58 {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ 58 {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
59 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
59 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ 60 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
60 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ 61 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
61 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ 62 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
@@ -143,11 +144,8 @@ static void p54u_tx_cb(struct urb *urb)
143 struct sk_buff *skb = urb->context; 144 struct sk_buff *skb = urb->context;
144 struct ieee80211_hw *dev = (struct ieee80211_hw *) 145 struct ieee80211_hw *dev = (struct ieee80211_hw *)
145 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); 146 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
146 struct p54u_priv *priv = dev->priv;
147 147
148 skb_pull(skb, priv->common.tx_hdr_len); 148 p54_free_skb(dev, skb);
149 if (FREE_AFTER_TX(skb))
150 p54_free_skb(dev, skb);
151} 149}
152 150
153static void p54u_tx_dummy_cb(struct urb *urb) { } 151static void p54u_tx_dummy_cb(struct urb *urb) { }
@@ -229,7 +227,10 @@ static void p54u_tx_3887(struct ieee80211_hw *dev, struct sk_buff *skb)
229 p54u_tx_dummy_cb, dev); 227 p54u_tx_dummy_cb, dev);
230 usb_fill_bulk_urb(data_urb, priv->udev, 228 usb_fill_bulk_urb(data_urb, priv->udev,
231 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 229 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
232 skb->data, skb->len, p54u_tx_cb, skb); 230 skb->data, skb->len, FREE_AFTER_TX(skb) ?
231 p54u_tx_cb : p54u_tx_dummy_cb, skb);
232 addr_urb->transfer_flags |= URB_ZERO_PACKET;
233 data_urb->transfer_flags |= URB_ZERO_PACKET;
233 234
234 usb_anchor_urb(addr_urb, &priv->submitted); 235 usb_anchor_urb(addr_urb, &priv->submitted);
235 err = usb_submit_urb(addr_urb, GFP_ATOMIC); 236 err = usb_submit_urb(addr_urb, GFP_ATOMIC);
@@ -238,7 +239,7 @@ static void p54u_tx_3887(struct ieee80211_hw *dev, struct sk_buff *skb)
238 goto out; 239 goto out;
239 } 240 }
240 241
241 usb_anchor_urb(addr_urb, &priv->submitted); 242 usb_anchor_urb(data_urb, &priv->submitted);
242 err = usb_submit_urb(data_urb, GFP_ATOMIC); 243 err = usb_submit_urb(data_urb, GFP_ATOMIC);
243 if (err) 244 if (err)
244 usb_unanchor_urb(data_urb); 245 usb_unanchor_urb(data_urb);
@@ -268,27 +269,24 @@ static void p54u_tx_lm87(struct ieee80211_hw *dev, struct sk_buff *skb)
268{ 269{
269 struct p54u_priv *priv = dev->priv; 270 struct p54u_priv *priv = dev->priv;
270 struct urb *data_urb; 271 struct urb *data_urb;
271 struct lm87_tx_hdr *hdr; 272 struct lm87_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr);
272 __le32 checksum;
273 __le32 addr = ((struct p54_hdr *)skb->data)->req_id;
274 273
275 data_urb = usb_alloc_urb(0, GFP_ATOMIC); 274 data_urb = usb_alloc_urb(0, GFP_ATOMIC);
276 if (!data_urb) 275 if (!data_urb)
277 return; 276 return;
278 277
279 checksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len); 278 hdr->chksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len);
280 hdr = (struct lm87_tx_hdr *)skb_push(skb, sizeof(*hdr)); 279 hdr->device_addr = ((struct p54_hdr *)skb->data)->req_id;
281 hdr->chksum = checksum;
282 hdr->device_addr = addr;
283 280
284 usb_fill_bulk_urb(data_urb, priv->udev, 281 usb_fill_bulk_urb(data_urb, priv->udev,
285 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 282 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
286 skb->data, skb->len, p54u_tx_cb, skb); 283 hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ?
284 p54u_tx_cb : p54u_tx_dummy_cb, skb);
285 data_urb->transfer_flags |= URB_ZERO_PACKET;
287 286
288 usb_anchor_urb(data_urb, &priv->submitted); 287 usb_anchor_urb(data_urb, &priv->submitted);
289 if (usb_submit_urb(data_urb, GFP_ATOMIC)) { 288 if (usb_submit_urb(data_urb, GFP_ATOMIC)) {
290 usb_unanchor_urb(data_urb); 289 usb_unanchor_urb(data_urb);
291 skb_pull(skb, sizeof(*hdr));
292 p54_free_skb(dev, skb); 290 p54_free_skb(dev, skb);
293 } 291 }
294 usb_free_urb(data_urb); 292 usb_free_urb(data_urb);
@@ -298,11 +296,9 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
298{ 296{
299 struct p54u_priv *priv = dev->priv; 297 struct p54u_priv *priv = dev->priv;
300 struct urb *int_urb, *data_urb; 298 struct urb *int_urb, *data_urb;
301 struct net2280_tx_hdr *hdr; 299 struct net2280_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr);
302 struct net2280_reg_write *reg; 300 struct net2280_reg_write *reg;
303 int err = 0; 301 int err = 0;
304 __le32 addr = ((struct p54_hdr *) skb->data)->req_id;
305 __le16 len = cpu_to_le16(skb->len);
306 302
307 reg = kmalloc(sizeof(*reg), GFP_ATOMIC); 303 reg = kmalloc(sizeof(*reg), GFP_ATOMIC);
308 if (!reg) 304 if (!reg)
@@ -325,10 +321,9 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
325 reg->addr = cpu_to_le32(P54U_DEV_BASE); 321 reg->addr = cpu_to_le32(P54U_DEV_BASE);
326 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); 322 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA);
327 323
328 hdr = (void *)skb_push(skb, sizeof(*hdr));
329 memset(hdr, 0, sizeof(*hdr)); 324 memset(hdr, 0, sizeof(*hdr));
330 hdr->len = len; 325 hdr->len = cpu_to_le16(skb->len);
331 hdr->device_addr = addr; 326 hdr->device_addr = ((struct p54_hdr *) skb->data)->req_id;
332 327
333 usb_fill_bulk_urb(int_urb, priv->udev, 328 usb_fill_bulk_urb(int_urb, priv->udev,
334 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), 329 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg),
@@ -339,11 +334,13 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
339 * free what's inside the transfer_buffer after the callback routine 334 * free what's inside the transfer_buffer after the callback routine
340 * has completed. 335 * has completed.
341 */ 336 */
342 int_urb->transfer_flags |= URB_FREE_BUFFER; 337 int_urb->transfer_flags |= URB_FREE_BUFFER | URB_ZERO_PACKET;
343 338
344 usb_fill_bulk_urb(data_urb, priv->udev, 339 usb_fill_bulk_urb(data_urb, priv->udev,
345 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), 340 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
346 skb->data, skb->len, p54u_tx_cb, skb); 341 hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ?
342 p54u_tx_cb : p54u_tx_dummy_cb, skb);
343 data_urb->transfer_flags |= URB_ZERO_PACKET;
347 344
348 usb_anchor_urb(int_urb, &priv->submitted); 345 usb_anchor_urb(int_urb, &priv->submitted);
349 err = usb_submit_urb(int_urb, GFP_ATOMIC); 346 err = usb_submit_urb(int_urb, GFP_ATOMIC);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 607ce9f61b54..ed93ac41297f 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1649,9 +1649,7 @@ static char *rndis_translate_scan(struct net_device *dev,
1649 char *end_buf, 1649 char *end_buf,
1650 struct ndis_80211_bssid_ex *bssid) 1650 struct ndis_80211_bssid_ex *bssid)
1651{ 1651{
1652#ifdef DEBUG
1653 struct usbnet *usbdev = netdev_priv(dev); 1652 struct usbnet *usbdev = netdev_priv(dev);
1654#endif
1655 u8 *ie; 1653 u8 *ie;
1656 char *current_val; 1654 char *current_val;
1657 int bssid_len, ie_len, i; 1655 int bssid_len, ie_len, i;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 30028e2422fc..af6b5847be5c 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -38,7 +38,7 @@
38/* 38/*
39 * Allow hardware encryption to be disabled. 39 * Allow hardware encryption to be disabled.
40 */ 40 */
41static int modparam_nohwcrypt = 1; 41static int modparam_nohwcrypt = 0;
42module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 42module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
43MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 43MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
44 44
@@ -376,11 +376,11 @@ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
376 376
377 /* 377 /*
378 * The driver does not support the IV/EIV generation 378 * The driver does not support the IV/EIV generation
379 * in hardware. However it doesn't support the IV/EIV 379 * in hardware. However it demands the data to be provided
380 * inside the ieee80211 frame either, but requires it 380 * both seperately as well as inside the frame.
381 * to be provided seperately for the descriptor. 381 * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib
382 * rt2x00lib will cut the IV/EIV data out of all frames 382 * to ensure rt2x00lib will not strip the data from the
383 * given to us by mac80211, but we must tell mac80211 383 * frame after the copy, now we must tell mac80211
384 * to generate the IV/EIV data. 384 * to generate the IV/EIV data.
385 */ 385 */
386 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 386 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@@ -1181,7 +1181,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1181 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); 1181 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1182 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1182 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1183 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1183 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1184 rt2x00_set_field32(&word, TXD_W0_CIPHER, txdesc->cipher); 1184 rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
1185 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); 1185 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
1186 rt2x00_desc_write(txd, 0, word); 1186 rt2x00_desc_write(txd, 0, word);
1187} 1187}
@@ -1334,14 +1334,7 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1334 1334
1335 /* ICV is located at the end of frame */ 1335 /* ICV is located at the end of frame */
1336 1336
1337 /* 1337 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
1338 * Hardware has stripped IV/EIV data from 802.11 frame during
1339 * decryption. It has provided the data seperately but rt2x00lib
1340 * should decide if it should be reinserted.
1341 */
1342 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
1343 if (rxdesc->cipher != CIPHER_TKIP)
1344 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
1345 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) 1338 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
1346 rxdesc->flags |= RX_FLAG_DECRYPTED; 1339 rxdesc->flags |= RX_FLAG_DECRYPTED;
1347 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) 1340 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 6d92542fcf0d..87c0f2c83077 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -807,13 +807,11 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry,
807{ 807{
808 entry->flags = 0; 808 entry->flags = 0;
809 entry->bitrate = rate->bitrate; 809 entry->bitrate = rate->bitrate;
810 entry->hw_value = rt2x00_create_rate_hw_value(index, 0); 810 entry->hw_value =index;
811 entry->hw_value_short = entry->hw_value; 811 entry->hw_value_short = index;
812 812
813 if (rate->flags & DEV_RATE_SHORT_PREAMBLE) { 813 if (rate->flags & DEV_RATE_SHORT_PREAMBLE)
814 entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE; 814 entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE;
815 entry->hw_value_short |= rt2x00_create_rate_hw_value(index, 1);
816 }
817} 815}
818 816
819static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, 817static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index 68f4e0fc35b9..a0cd35b6beb5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -97,7 +97,7 @@ void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled)
97 97
98void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled) 98void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled)
99{ 99{
100 if (rt2x00dev->led_radio.type == LED_TYPE_ASSOC) 100 if (rt2x00dev->led_radio.type == LED_TYPE_RADIO)
101 rt2x00led_led_simple(&rt2x00dev->led_radio, enabled); 101 rt2x00led_led_simple(&rt2x00dev->led_radio, enabled);
102} 102}
103 103
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 03024327767b..86cd26fbf769 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -52,22 +52,11 @@ struct rt2x00_rate {
52 52
53extern const struct rt2x00_rate rt2x00_supported_rates[12]; 53extern const struct rt2x00_rate rt2x00_supported_rates[12];
54 54
55static inline u16 rt2x00_create_rate_hw_value(const u16 index,
56 const u16 short_preamble)
57{
58 return (short_preamble << 8) | (index & 0xff);
59}
60
61static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value) 55static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
62{ 56{
63 return &rt2x00_supported_rates[hw_value & 0xff]; 57 return &rt2x00_supported_rates[hw_value & 0xff];
64} 58}
65 59
66static inline int rt2x00_get_rate_preamble(const u16 hw_value)
67{
68 return (hw_value & 0xff00);
69}
70
71/* 60/*
72 * Radio control handlers. 61 * Radio control handlers.
73 */ 62 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index eaec6bd93ed5..0709decec9c2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -154,6 +154,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
155 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 155 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 156 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
157 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
157 struct ieee80211_rate *rate = 158 struct ieee80211_rate *rate =
158 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 159 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
159 const struct rt2x00_rate *hwrate; 160 const struct rt2x00_rate *hwrate;
@@ -313,7 +314,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
313 * When preamble is enabled we should set the 314 * When preamble is enabled we should set the
314 * preamble bit for the signal. 315 * preamble bit for the signal.
315 */ 316 */
316 if (rt2x00_get_rate_preamble(rate->hw_value)) 317 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
317 txdesc->signal |= 0x08; 318 txdesc->signal |= 0x08;
318 } 319 }
319} 320}
diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
index c3f53a92180a..3298cae1e12d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c
+++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
@@ -162,7 +162,7 @@ void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
162 162
163void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) 163void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
164{ 164{
165 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->flags)) 165 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
166 return; 166 return;
167 167
168 cancel_delayed_work_sync(&rt2x00dev->rfkill_work); 168 cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 83df312ac56f..0b29d767a258 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -434,11 +434,11 @@ static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
434 434
435 if (usb_endpoint_is_bulk_in(ep_desc)) { 435 if (usb_endpoint_is_bulk_in(ep_desc)) {
436 rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc); 436 rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
437 } else if (usb_endpoint_is_bulk_out(ep_desc)) { 437 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
438 (queue != queue_end(rt2x00dev))) {
438 rt2x00usb_assign_endpoint(queue, ep_desc); 439 rt2x00usb_assign_endpoint(queue, ep_desc);
440 queue = queue_next(queue);
439 441
440 if (queue != queue_end(rt2x00dev))
441 queue = queue_next(queue);
442 tx_ep_desc = ep_desc; 442 tx_ep_desc = ep_desc;
443 } 443 }
444 } 444 }
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index d638a8a59370..96a8d69f8790 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2321,6 +2321,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2321 /* Linksys */ 2321 /* Linksys */
2322 { USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) }, 2322 { USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) },
2323 { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) }, 2323 { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
2324 { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
2324 /* MSI */ 2325 /* MSI */
2325 { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) }, 2326 { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
2326 { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) }, 2327 { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 5f887fb137a9..387c133ec0f2 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -897,6 +897,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
897 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 897 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
898 IEEE80211_HW_RX_INCLUDES_FCS | 898 IEEE80211_HW_RX_INCLUDES_FCS |
899 IEEE80211_HW_SIGNAL_UNSPEC; 899 IEEE80211_HW_SIGNAL_UNSPEC;
900 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
900 dev->queues = 1; 901 dev->queues = 1;
901 dev->max_signal = 65; 902 dev->max_signal = 65;
902 903
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 00ce3ef39abe..22bc07ef2f37 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -213,7 +213,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
213 urb = usb_alloc_urb(0, GFP_ATOMIC); 213 urb = usb_alloc_urb(0, GFP_ATOMIC);
214 if (!urb) { 214 if (!urb) {
215 kfree_skb(skb); 215 kfree_skb(skb);
216 return -ENOMEM; 216 return NETDEV_TX_OK;
217 } 217 }
218 218
219 flags = skb->len; 219 flags = skb->len;
@@ -273,6 +273,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
273 273
274 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep), 274 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep),
275 buf, skb->len, rtl8187_tx_cb, skb); 275 buf, skb->len, rtl8187_tx_cb, skb);
276 urb->transfer_flags |= URB_ZERO_PACKET;
276 usb_anchor_urb(urb, &priv->anchored); 277 usb_anchor_urb(urb, &priv->anchored);
277 rc = usb_submit_urb(urb, GFP_ATOMIC); 278 rc = usb_submit_urb(urb, GFP_ATOMIC);
278 if (rc < 0) { 279 if (rc < 0) {
@@ -281,7 +282,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
281 } 282 }
282 usb_free_urb(urb); 283 usb_free_urb(urb);
283 284
284 return rc; 285 return NETDEV_TX_OK;
285} 286}
286 287
287static void rtl8187_rx_cb(struct urb *urb) 288static void rtl8187_rx_cb(struct urb *urb)
@@ -1471,6 +1472,7 @@ static void __devexit rtl8187_disconnect(struct usb_interface *intf)
1471 ieee80211_unregister_hw(dev); 1472 ieee80211_unregister_hw(dev);
1472 1473
1473 priv = dev->priv; 1474 priv = dev->priv;
1475 usb_reset_device(priv->udev);
1474 usb_put_dev(interface_to_usbdev(intf)); 1476 usb_put_dev(interface_to_usbdev(intf));
1475 ieee80211_free_hw(dev); 1477 ieee80211_free_hw(dev);
1476} 1478}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index b5db57d2fcf5..17527f765b39 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -84,6 +84,7 @@ static struct usb_device_id usb_ids[] = {
84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B }, 84 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B }, 85 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B }, 86 { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
87 { USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B },
87 /* "Driverless" devices that need ejecting */ 88 /* "Driverless" devices that need ejecting */
88 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 89 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
89 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, 90 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 2e03b6d796d3..e76d715e4342 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -393,16 +393,21 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
393 return; 393 return;
394 394
395fail: 395fail:
396 entry->event = NULL;
396 cpu_buf->sample_lost_overflow++; 397 cpu_buf->sample_lost_overflow++;
397} 398}
398 399
399int oprofile_add_data(struct op_entry *entry, unsigned long val) 400int oprofile_add_data(struct op_entry *entry, unsigned long val)
400{ 401{
402 if (!entry->event)
403 return 0;
401 return op_cpu_buffer_add_data(entry, val); 404 return op_cpu_buffer_add_data(entry, val);
402} 405}
403 406
404int oprofile_write_commit(struct op_entry *entry) 407int oprofile_write_commit(struct op_entry *entry)
405{ 408{
409 if (!entry->event)
410 return -EINVAL;
406 return op_cpu_buffer_write_commit(entry); 411 return op_cpu_buffer_write_commit(entry);
407} 412}
408 413
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 63f81c44846a..272995d20293 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -66,6 +66,13 @@ static inline void op_cpu_buffer_reset(int cpu)
66 cpu_buf->last_task = NULL; 66 cpu_buf->last_task = NULL;
67} 67}
68 68
69/*
70 * op_cpu_buffer_add_data() and op_cpu_buffer_write_commit() may be
71 * called only if op_cpu_buffer_write_reserve() did not return NULL or
72 * entry->event != NULL, otherwise entry->size or entry->event will be
73 * used uninitialized.
74 */
75
69struct op_sample 76struct op_sample
70*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size); 77*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size);
71int op_cpu_buffer_write_commit(struct op_entry *entry); 78int op_cpu_buffer_write_commit(struct op_entry *entry);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index f09b1010d477..803d9ddd6e75 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -266,6 +266,8 @@ static int detect_ejectable_slots(struct pci_bus *pbus)
266 int found = acpi_pci_detect_ejectable(pbus); 266 int found = acpi_pci_detect_ejectable(pbus);
267 if (!found) { 267 if (!found) {
268 acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus); 268 acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus);
269 if (!bridge_handle)
270 return 0;
269 acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1, 271 acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1,
270 is_pci_dock_device, (void *)&found, NULL); 272 is_pci_dock_device, (void *)&found, NULL);
271 } 273 }
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 5482d4ed8256..c2485542f543 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -126,8 +126,10 @@ static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
126 mutex_lock(&slot->ctrl->crit_sect); 126 mutex_lock(&slot->ctrl->crit_sect);
127 127
128 /* has it been >1 sec since our last toggle? */ 128 /* has it been >1 sec since our last toggle? */
129 if ((get_seconds() - slot->last_emi_toggle) < 1) 129 if ((get_seconds() - slot->last_emi_toggle) < 1) {
130 mutex_unlock(&slot->ctrl->crit_sect);
130 return -EINVAL; 131 return -EINVAL;
132 }
131 133
132 /* see what our current state is */ 134 /* see what our current state is */
133 retval = get_lock_status(hotplug_slot, &value); 135 retval = get_lock_status(hotplug_slot, &value);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index b4a90badd0a6..896a15d70f5b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -398,21 +398,19 @@ static int msi_capability_init(struct pci_dev *dev)
398 entry->msi_attrib.masked = 1; 398 entry->msi_attrib.masked = 1;
399 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 399 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
400 entry->msi_attrib.pos = pos; 400 entry->msi_attrib.pos = pos;
401 if (entry->msi_attrib.maskbit) {
402 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
403 entry->msi_attrib.is_64);
404 }
405 entry->dev = dev; 401 entry->dev = dev;
406 if (entry->msi_attrib.maskbit) { 402 if (entry->msi_attrib.maskbit) {
407 unsigned int maskbits, temp; 403 unsigned int base, maskbits, temp;
404
405 base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
406 entry->mask_base = (void __iomem *)(long)base;
407
408 /* All MSIs are unmasked by default, Mask them all */ 408 /* All MSIs are unmasked by default, Mask them all */
409 pci_read_config_dword(dev, 409 pci_read_config_dword(dev, base, &maskbits);
410 msi_mask_bits_reg(pos, entry->msi_attrib.is_64),
411 &maskbits);
412 temp = (1 << multi_msi_capable(control)); 410 temp = (1 << multi_msi_capable(control));
413 temp = ((temp - 1) & ~temp); 411 temp = ((temp - 1) & ~temp);
414 maskbits |= temp; 412 maskbits |= temp;
415 pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits); 413 pci_write_config_dword(dev, base, maskbits);
416 entry->msi_attrib.maskbits_mask = temp; 414 entry->msi_attrib.maskbits_mask = temp;
417 } 415 }
418 list_add_tail(&entry->list, &dev->msi_list); 416 list_add_tail(&entry->list, &dev->msi_list);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c697f2680856..9de07b75b993 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -355,17 +355,27 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
355 int i = 0; 355 int i = 0;
356 356
357 if (drv && drv->suspend) { 357 if (drv && drv->suspend) {
358 pci_dev->state_saved = false;
359
358 i = drv->suspend(pci_dev, state); 360 i = drv->suspend(pci_dev, state);
359 suspend_report_result(drv->suspend, i); 361 suspend_report_result(drv->suspend, i);
360 } else { 362 if (i)
361 pci_save_state(pci_dev); 363 return i;
362 /* 364
363 * This is for compatibility with existing code with legacy PM 365 if (pci_dev->state_saved)
364 * support. 366 goto Fixup;
365 */ 367
366 pci_pm_set_unknown_state(pci_dev); 368 if (WARN_ON_ONCE(pci_dev->current_state != PCI_D0))
369 goto Fixup;
367 } 370 }
368 371
372 pci_save_state(pci_dev);
373 /*
374 * This is for compatibility with existing code with legacy PM support.
375 */
376 pci_pm_set_unknown_state(pci_dev);
377
378 Fixup:
369 pci_fixup_device(pci_fixup_suspend, pci_dev); 379 pci_fixup_device(pci_fixup_suspend, pci_dev);
370 380
371 return i; 381 return i;
@@ -386,81 +396,34 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
386 396
387static int pci_legacy_resume_early(struct device *dev) 397static int pci_legacy_resume_early(struct device *dev)
388{ 398{
389 int error = 0;
390 struct pci_dev * pci_dev = to_pci_dev(dev); 399 struct pci_dev * pci_dev = to_pci_dev(dev);
391 struct pci_driver * drv = pci_dev->driver; 400 struct pci_driver * drv = pci_dev->driver;
392 401
393 pci_fixup_device(pci_fixup_resume_early, pci_dev); 402 return drv && drv->resume_early ?
394 403 drv->resume_early(pci_dev) : 0;
395 if (drv && drv->resume_early)
396 error = drv->resume_early(pci_dev);
397 return error;
398} 404}
399 405
400static int pci_legacy_resume(struct device *dev) 406static int pci_legacy_resume(struct device *dev)
401{ 407{
402 int error;
403 struct pci_dev * pci_dev = to_pci_dev(dev); 408 struct pci_dev * pci_dev = to_pci_dev(dev);
404 struct pci_driver * drv = pci_dev->driver; 409 struct pci_driver * drv = pci_dev->driver;
405 410
406 pci_fixup_device(pci_fixup_resume, pci_dev); 411 pci_fixup_device(pci_fixup_resume, pci_dev);
407 412
408 if (drv && drv->resume) { 413 return drv && drv->resume ?
409 error = drv->resume(pci_dev); 414 drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
410 } else {
411 /* restore the PCI config space */
412 pci_restore_state(pci_dev);
413 error = pci_pm_reenable_device(pci_dev);
414 }
415 return error;
416} 415}
417 416
418/* Auxiliary functions used by the new power management framework */ 417/* Auxiliary functions used by the new power management framework */
419 418
420static int pci_restore_standard_config(struct pci_dev *pci_dev)
421{
422 struct pci_dev *parent = pci_dev->bus->self;
423 int error = 0;
424
425 /* Check if the device's bus is operational */
426 if (!parent || parent->current_state == PCI_D0) {
427 pci_restore_state(pci_dev);
428 pci_update_current_state(pci_dev, PCI_D0);
429 } else {
430 dev_warn(&pci_dev->dev, "unable to restore config, "
431 "bridge %s in low power state D%d\n", pci_name(parent),
432 parent->current_state);
433 pci_dev->current_state = PCI_UNKNOWN;
434 error = -EAGAIN;
435 }
436
437 return error;
438}
439
440static bool pci_is_bridge(struct pci_dev *pci_dev)
441{
442 return !!(pci_dev->subordinate);
443}
444
445static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) 419static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
446{ 420{
447 if (pci_restore_standard_config(pci_dev)) 421 pci_restore_standard_config(pci_dev);
448 pci_fixup_device(pci_fixup_resume_early, pci_dev); 422 pci_fixup_device(pci_fixup_resume_early, pci_dev);
449} 423}
450 424
451static int pci_pm_default_resume(struct pci_dev *pci_dev) 425static int pci_pm_default_resume(struct pci_dev *pci_dev)
452{ 426{
453 /*
454 * pci_restore_standard_config() should have been called once already,
455 * but it would have failed if the device's parent bridge had not been
456 * in power state D0 at that time. Check it and try again if necessary.
457 */
458 if (pci_dev->current_state == PCI_UNKNOWN) {
459 int error = pci_restore_standard_config(pci_dev);
460 if (error)
461 return error;
462 }
463
464 pci_fixup_device(pci_fixup_resume, pci_dev); 427 pci_fixup_device(pci_fixup_resume, pci_dev);
465 428
466 if (!pci_is_bridge(pci_dev)) 429 if (!pci_is_bridge(pci_dev))
@@ -575,11 +538,11 @@ static int pci_pm_resume_noirq(struct device *dev)
575 struct device_driver *drv = dev->driver; 538 struct device_driver *drv = dev->driver;
576 int error = 0; 539 int error = 0;
577 540
541 pci_pm_default_resume_noirq(pci_dev);
542
578 if (pci_has_legacy_pm_support(pci_dev)) 543 if (pci_has_legacy_pm_support(pci_dev))
579 return pci_legacy_resume_early(dev); 544 return pci_legacy_resume_early(dev);
580 545
581 pci_pm_default_resume_noirq(pci_dev);
582
583 if (drv && drv->pm && drv->pm->resume_noirq) 546 if (drv && drv->pm && drv->pm->resume_noirq)
584 error = drv->pm->resume_noirq(dev); 547 error = drv->pm->resume_noirq(dev);
585 548
@@ -730,11 +693,11 @@ static int pci_pm_restore_noirq(struct device *dev)
730 struct device_driver *drv = dev->driver; 693 struct device_driver *drv = dev->driver;
731 int error = 0; 694 int error = 0;
732 695
696 pci_pm_default_resume_noirq(pci_dev);
697
733 if (pci_has_legacy_pm_support(pci_dev)) 698 if (pci_has_legacy_pm_support(pci_dev))
734 return pci_legacy_resume_early(dev); 699 return pci_legacy_resume_early(dev);
735 700
736 pci_pm_default_resume_noirq(pci_dev);
737
738 if (drv && drv->pm && drv->pm->restore_noirq) 701 if (drv && drv->pm && drv->pm->restore_noirq)
739 error = drv->pm->restore_noirq(dev); 702 error = drv->pm->restore_noirq(dev);
740 703
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c12f6c790698..17bd9325a245 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -22,7 +22,7 @@
22#include <asm/dma.h> /* isa_dma_bridge_buggy */ 22#include <asm/dma.h> /* isa_dma_bridge_buggy */
23#include "pci.h" 23#include "pci.h"
24 24
25unsigned int pci_pm_d3_delay = 10; 25unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
26 26
27#ifdef CONFIG_PCI_DOMAINS 27#ifdef CONFIG_PCI_DOMAINS
28int pci_domains_supported = 1; 28int pci_domains_supported = 1;
@@ -426,6 +426,7 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
426 * given PCI device 426 * given PCI device
427 * @dev: PCI device to handle. 427 * @dev: PCI device to handle.
428 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 428 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
429 * @wait: If 'true', wait for the device to change its power state
429 * 430 *
430 * RETURN VALUE: 431 * RETURN VALUE:
431 * -EINVAL if the requested state is invalid. 432 * -EINVAL if the requested state is invalid.
@@ -435,7 +436,7 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
435 * 0 if device's power state has been successfully changed. 436 * 0 if device's power state has been successfully changed.
436 */ 437 */
437static int 438static int
438pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 439pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
439{ 440{
440 u16 pmcsr; 441 u16 pmcsr;
441 bool need_restore = false; 442 bool need_restore = false;
@@ -480,8 +481,10 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
480 break; 481 break;
481 case PCI_UNKNOWN: /* Boot-up */ 482 case PCI_UNKNOWN: /* Boot-up */
482 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 483 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
483 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 484 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) {
484 need_restore = true; 485 need_restore = true;
486 wait = true;
487 }
485 /* Fall-through: force to D0 */ 488 /* Fall-through: force to D0 */
486 default: 489 default:
487 pmcsr = 0; 490 pmcsr = 0;
@@ -491,12 +494,15 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
491 /* enter specified state */ 494 /* enter specified state */
492 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 495 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
493 496
497 if (!wait)
498 return 0;
499
494 /* Mandatory power management transition delays */ 500 /* Mandatory power management transition delays */
495 /* see PCI PM 1.1 5.6.1 table 18 */ 501 /* see PCI PM 1.1 5.6.1 table 18 */
496 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 502 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
497 msleep(pci_pm_d3_delay); 503 msleep(pci_pm_d3_delay);
498 else if (state == PCI_D2 || dev->current_state == PCI_D2) 504 else if (state == PCI_D2 || dev->current_state == PCI_D2)
499 udelay(200); 505 udelay(PCI_PM_D2_DELAY);
500 506
501 dev->current_state = state; 507 dev->current_state = state;
502 508
@@ -515,7 +521,7 @@ pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
515 if (need_restore) 521 if (need_restore)
516 pci_restore_bars(dev); 522 pci_restore_bars(dev);
517 523
518 if (dev->bus->self) 524 if (wait && dev->bus->self)
519 pcie_aspm_pm_state_change(dev->bus->self); 525 pcie_aspm_pm_state_change(dev->bus->self);
520 526
521 return 0; 527 return 0;
@@ -585,7 +591,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
585 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 591 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
586 return 0; 592 return 0;
587 593
588 error = pci_raw_set_power_state(dev, state); 594 error = pci_raw_set_power_state(dev, state, true);
589 595
590 if (state > PCI_D0 && platform_pci_power_manageable(dev)) { 596 if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
591 /* Allow the platform to finalize the transition */ 597 /* Allow the platform to finalize the transition */
@@ -730,6 +736,7 @@ pci_save_state(struct pci_dev *dev)
730 /* XXX: 100% dword access ok here? */ 736 /* XXX: 100% dword access ok here? */
731 for (i = 0; i < 16; i++) 737 for (i = 0; i < 16; i++)
732 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 738 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
739 dev->state_saved = true;
733 if ((i = pci_save_pcie_state(dev)) != 0) 740 if ((i = pci_save_pcie_state(dev)) != 0)
734 return i; 741 return i;
735 if ((i = pci_save_pcix_state(dev)) != 0) 742 if ((i = pci_save_pcix_state(dev)) != 0)
@@ -1260,15 +1267,14 @@ void pci_pm_init(struct pci_dev *dev)
1260 /* find PCI PM capability in list */ 1267 /* find PCI PM capability in list */
1261 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1268 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1262 if (!pm) 1269 if (!pm)
1263 goto Exit; 1270 return;
1264
1265 /* Check device's ability to generate PME# */ 1271 /* Check device's ability to generate PME# */
1266 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1272 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1267 1273
1268 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1274 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1269 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1275 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1270 pmc & PCI_PM_CAP_VER_MASK); 1276 pmc & PCI_PM_CAP_VER_MASK);
1271 goto Exit; 1277 return;
1272 } 1278 }
1273 1279
1274 dev->pm_cap = pm; 1280 dev->pm_cap = pm;
@@ -1307,9 +1313,6 @@ void pci_pm_init(struct pci_dev *dev)
1307 } else { 1313 } else {
1308 dev->pme_support = 0; 1314 dev->pme_support = 0;
1309 } 1315 }
1310
1311 Exit:
1312 pci_update_current_state(dev, PCI_D0);
1313} 1316}
1314 1317
1315/** 1318/**
@@ -1378,6 +1381,50 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1378} 1381}
1379 1382
1380/** 1383/**
1384 * pci_restore_standard_config - restore standard config registers of PCI device
1385 * @dev: PCI device to handle
1386 *
1387 * This function assumes that the device's configuration space is accessible.
1388 * If the device needs to be powered up, the function will wait for it to
1389 * change the state.
1390 */
1391int pci_restore_standard_config(struct pci_dev *dev)
1392{
1393 pci_power_t prev_state;
1394 int error;
1395
1396 pci_restore_state(dev);
1397 pci_update_current_state(dev, PCI_D0);
1398
1399 prev_state = dev->current_state;
1400 if (prev_state == PCI_D0)
1401 return 0;
1402
1403 error = pci_raw_set_power_state(dev, PCI_D0, false);
1404 if (error)
1405 return error;
1406
1407 if (pci_is_bridge(dev)) {
1408 if (prev_state > PCI_D1)
1409 mdelay(PCI_PM_BUS_WAIT);
1410 } else {
1411 switch(prev_state) {
1412 case PCI_D3cold:
1413 case PCI_D3hot:
1414 mdelay(pci_pm_d3_delay);
1415 break;
1416 case PCI_D2:
1417 udelay(PCI_PM_D2_DELAY);
1418 break;
1419 }
1420 }
1421
1422 dev->current_state = PCI_D0;
1423
1424 return 0;
1425}
1426
1427/**
1381 * pci_enable_ari - enable ARI forwarding if hardware support it 1428 * pci_enable_ari - enable ARI forwarding if hardware support it
1382 * @dev: the PCI device 1429 * @dev: the PCI device
1383 */ 1430 */
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 1351bb4addde..26ddf78ac300 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -49,6 +49,12 @@ extern void pci_disable_enabled_device(struct pci_dev *dev);
49extern void pci_pm_init(struct pci_dev *dev); 49extern void pci_pm_init(struct pci_dev *dev);
50extern void platform_pci_wakeup_init(struct pci_dev *dev); 50extern void platform_pci_wakeup_init(struct pci_dev *dev);
51extern void pci_allocate_cap_save_buffers(struct pci_dev *dev); 51extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
52extern int pci_restore_standard_config(struct pci_dev *dev);
53
54static inline bool pci_is_bridge(struct pci_dev *pci_dev)
55{
56 return !!(pci_dev->subordinate);
57}
52 58
53extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 59extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
54extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 60extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e65448e99b48..1a266d4ab5f1 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -54,6 +54,18 @@ config ASUS_LAPTOP
54 54
55 If you have an ACPI-compatible ASUS laptop, say Y or M here. 55 If you have an ACPI-compatible ASUS laptop, say Y or M here.
56 56
57config DELL_LAPTOP
58 tristate "Dell Laptop Extras (EXPERIMENTAL)"
59 depends on X86
60 depends on DCDBAS
61 depends on EXPERIMENTAL
62 depends on BACKLIGHT_CLASS_DEVICE
63 depends on RFKILL
64 default n
65 ---help---
66 This driver adds support for rfkill and backlight control to Dell
67 laptops.
68
57config FUJITSU_LAPTOP 69config FUJITSU_LAPTOP
58 tristate "Fujitsu Laptop Extras" 70 tristate "Fujitsu Laptop Extras"
59 depends on ACPI 71 depends on ACPI
@@ -192,6 +204,17 @@ config THINKPAD_ACPI
192 204
193 If you have an IBM or Lenovo ThinkPad laptop, say Y or M here. 205 If you have an IBM or Lenovo ThinkPad laptop, say Y or M here.
194 206
207config THINKPAD_ACPI_DEBUGFACILITIES
208 bool "Maintainer debug facilities"
209 depends on THINKPAD_ACPI
210 default n
211 ---help---
212 Enables extra stuff in the thinkpad-acpi which is completely useless
213 for normal use. Read the driver source to find out what it does.
214
215 Say N here, unless you were told by a kernel maintainer to do
216 otherwise.
217
195config THINKPAD_ACPI_DEBUG 218config THINKPAD_ACPI_DEBUG
196 bool "Verbose debug mode" 219 bool "Verbose debug mode"
197 depends on THINKPAD_ACPI 220 depends on THINKPAD_ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 1e9de2ae0de5..e29065120be9 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
6obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o 6obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
7obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o 7obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
8obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o 8obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
9obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o
9obj-$(CONFIG_ACER_WMI) += acer-wmi.o 10obj-$(CONFIG_ACER_WMI) += acer-wmi.o
10obj-$(CONFIG_HP_WMI) += hp-wmi.o 11obj-$(CONFIG_HP_WMI) += hp-wmi.o
11obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o 12obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
diff --git a/drivers/misc/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 4d33a2068b7a..16e11c2ee19a 100644
--- a/drivers/misc/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -22,7 +22,7 @@
22#include <linux/rfkill.h> 22#include <linux/rfkill.h>
23#include <linux/power_supply.h> 23#include <linux/power_supply.h>
24#include <linux/acpi.h> 24#include <linux/acpi.h>
25#include "../firmware/dcdbas.h" 25#include "../../firmware/dcdbas.h"
26 26
27#define BRIGHTNESS_TOKEN 0x7d 27#define BRIGHTNESS_TOKEN 0x7d
28 28
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 02fe2b8b8939..9d93cb971e59 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -90,7 +90,7 @@ enum {
90}; 90};
91 91
92static const char *cm_getv[] = { 92static const char *cm_getv[] = {
93 "WLDG", NULL, NULL, NULL, 93 "WLDG", "BTHG", NULL, NULL,
94 "CAMG", NULL, NULL, NULL, 94 "CAMG", NULL, NULL, NULL,
95 NULL, "PBLG", NULL, NULL, 95 NULL, "PBLG", NULL, NULL,
96 "CFVG", NULL, NULL, NULL, 96 "CFVG", NULL, NULL, NULL,
@@ -99,7 +99,7 @@ static const char *cm_getv[] = {
99}; 99};
100 100
101static const char *cm_setv[] = { 101static const char *cm_setv[] = {
102 "WLDS", NULL, NULL, NULL, 102 "WLDS", "BTHS", NULL, NULL,
103 "CAMS", NULL, NULL, NULL, 103 "CAMS", NULL, NULL, NULL,
104 "SDSP", "PBLS", "HDPS", NULL, 104 "SDSP", "PBLS", "HDPS", NULL,
105 "CFVS", NULL, NULL, NULL, 105 "CFVS", NULL, NULL, NULL,
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 3478453eba7a..bcbc05107ba8 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -21,7 +21,7 @@
21 * 02110-1301, USA. 21 * 02110-1301, USA.
22 */ 22 */
23 23
24#define TPACPI_VERSION "0.21" 24#define TPACPI_VERSION "0.22"
25#define TPACPI_SYSFS_VERSION 0x020200 25#define TPACPI_SYSFS_VERSION 0x020200
26 26
27/* 27/*
@@ -122,6 +122,27 @@ enum {
122#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */ 122#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */
123#define TPACPI_HKEY_INPUT_VERSION 0x4101 123#define TPACPI_HKEY_INPUT_VERSION 0x4101
124 124
125/* ACPI \WGSV commands */
126enum {
127 TP_ACPI_WGSV_GET_STATE = 0x01, /* Get state information */
128 TP_ACPI_WGSV_PWR_ON_ON_RESUME = 0x02, /* Resume WWAN powered on */
129 TP_ACPI_WGSV_PWR_OFF_ON_RESUME = 0x03, /* Resume WWAN powered off */
130 TP_ACPI_WGSV_SAVE_STATE = 0x04, /* Save state for S4/S5 */
131};
132
133/* TP_ACPI_WGSV_GET_STATE bits */
134enum {
135 TP_ACPI_WGSV_STATE_WWANEXIST = 0x0001, /* WWAN hw available */
136 TP_ACPI_WGSV_STATE_WWANPWR = 0x0002, /* WWAN radio enabled */
137 TP_ACPI_WGSV_STATE_WWANPWRRES = 0x0004, /* WWAN state at resume */
138 TP_ACPI_WGSV_STATE_WWANBIOSOFF = 0x0008, /* WWAN disabled in BIOS */
139 TP_ACPI_WGSV_STATE_BLTHEXIST = 0x0001, /* BLTH hw available */
140 TP_ACPI_WGSV_STATE_BLTHPWR = 0x0002, /* BLTH radio enabled */
141 TP_ACPI_WGSV_STATE_BLTHPWRRES = 0x0004, /* BLTH state at resume */
142 TP_ACPI_WGSV_STATE_BLTHBIOSOFF = 0x0008, /* BLTH disabled in BIOS */
143 TP_ACPI_WGSV_STATE_UWBEXIST = 0x0010, /* UWB hw available */
144 TP_ACPI_WGSV_STATE_UWBPWR = 0x0020, /* UWB radio enabled */
145};
125 146
126/**************************************************************************** 147/****************************************************************************
127 * Main driver 148 * Main driver
@@ -148,14 +169,17 @@ enum {
148enum { 169enum {
149 TPACPI_RFK_BLUETOOTH_SW_ID = 0, 170 TPACPI_RFK_BLUETOOTH_SW_ID = 0,
150 TPACPI_RFK_WWAN_SW_ID, 171 TPACPI_RFK_WWAN_SW_ID,
172 TPACPI_RFK_UWB_SW_ID,
151}; 173};
152 174
153/* Debugging */ 175/* Debugging */
154#define TPACPI_LOG TPACPI_FILE ": " 176#define TPACPI_LOG TPACPI_FILE ": "
155#define TPACPI_ERR KERN_ERR TPACPI_LOG 177#define TPACPI_ALERT KERN_ALERT TPACPI_LOG
156#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG 178#define TPACPI_CRIT KERN_CRIT TPACPI_LOG
157#define TPACPI_INFO KERN_INFO TPACPI_LOG 179#define TPACPI_ERR KERN_ERR TPACPI_LOG
158#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG 180#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG
181#define TPACPI_INFO KERN_INFO TPACPI_LOG
182#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG
159 183
160#define TPACPI_DBG_ALL 0xffff 184#define TPACPI_DBG_ALL 0xffff
161#define TPACPI_DBG_INIT 0x0001 185#define TPACPI_DBG_INIT 0x0001
@@ -201,6 +225,7 @@ struct ibm_struct {
201 void (*exit) (void); 225 void (*exit) (void);
202 void (*resume) (void); 226 void (*resume) (void);
203 void (*suspend) (pm_message_t state); 227 void (*suspend) (pm_message_t state);
228 void (*shutdown) (void);
204 229
205 struct list_head all_drivers; 230 struct list_head all_drivers;
206 231
@@ -239,6 +264,7 @@ static struct {
239 u32 bright_16levels:1; 264 u32 bright_16levels:1;
240 u32 bright_acpimode:1; 265 u32 bright_acpimode:1;
241 u32 wan:1; 266 u32 wan:1;
267 u32 uwb:1;
242 u32 fan_ctrl_status_undef:1; 268 u32 fan_ctrl_status_undef:1;
243 u32 input_device_registered:1; 269 u32 input_device_registered:1;
244 u32 platform_drv_registered:1; 270 u32 platform_drv_registered:1;
@@ -288,6 +314,18 @@ struct tpacpi_led_classdev {
288 unsigned int led; 314 unsigned int led;
289}; 315};
290 316
317#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
318static int dbg_wlswemul;
319static int tpacpi_wlsw_emulstate;
320static int dbg_bluetoothemul;
321static int tpacpi_bluetooth_emulstate;
322static int dbg_wwanemul;
323static int tpacpi_wwan_emulstate;
324static int dbg_uwbemul;
325static int tpacpi_uwb_emulstate;
326#endif
327
328
291/**************************************************************************** 329/****************************************************************************
292 **************************************************************************** 330 ****************************************************************************
293 * 331 *
@@ -728,6 +766,18 @@ static int tpacpi_resume_handler(struct platform_device *pdev)
728 return 0; 766 return 0;
729} 767}
730 768
769static void tpacpi_shutdown_handler(struct platform_device *pdev)
770{
771 struct ibm_struct *ibm, *itmp;
772
773 list_for_each_entry_safe(ibm, itmp,
774 &tpacpi_all_drivers,
775 all_drivers) {
776 if (ibm->shutdown)
777 (ibm->shutdown)();
778 }
779}
780
731static struct platform_driver tpacpi_pdriver = { 781static struct platform_driver tpacpi_pdriver = {
732 .driver = { 782 .driver = {
733 .name = TPACPI_DRVR_NAME, 783 .name = TPACPI_DRVR_NAME,
@@ -735,6 +785,7 @@ static struct platform_driver tpacpi_pdriver = {
735 }, 785 },
736 .suspend = tpacpi_suspend_handler, 786 .suspend = tpacpi_suspend_handler,
737 .resume = tpacpi_resume_handler, 787 .resume = tpacpi_resume_handler,
788 .shutdown = tpacpi_shutdown_handler,
738}; 789};
739 790
740static struct platform_driver tpacpi_hwmon_pdriver = { 791static struct platform_driver tpacpi_hwmon_pdriver = {
@@ -922,11 +973,27 @@ static int __init tpacpi_new_rfkill(const unsigned int id,
922 struct rfkill **rfk, 973 struct rfkill **rfk,
923 const enum rfkill_type rfktype, 974 const enum rfkill_type rfktype,
924 const char *name, 975 const char *name,
976 const bool set_default,
925 int (*toggle_radio)(void *, enum rfkill_state), 977 int (*toggle_radio)(void *, enum rfkill_state),
926 int (*get_state)(void *, enum rfkill_state *)) 978 int (*get_state)(void *, enum rfkill_state *))
927{ 979{
928 int res; 980 int res;
929 enum rfkill_state initial_state; 981 enum rfkill_state initial_state = RFKILL_STATE_SOFT_BLOCKED;
982
983 res = get_state(NULL, &initial_state);
984 if (res < 0) {
985 printk(TPACPI_ERR
986 "failed to read initial state for %s, error %d; "
987 "will turn radio off\n", name, res);
988 } else if (set_default) {
989 /* try to set the initial state as the default for the rfkill
990 * type, since we ask the firmware to preserve it across S5 in
991 * NVRAM */
992 rfkill_set_default(rfktype,
993 (initial_state == RFKILL_STATE_UNBLOCKED) ?
994 RFKILL_STATE_UNBLOCKED :
995 RFKILL_STATE_SOFT_BLOCKED);
996 }
930 997
931 *rfk = rfkill_allocate(&tpacpi_pdev->dev, rfktype); 998 *rfk = rfkill_allocate(&tpacpi_pdev->dev, rfktype);
932 if (!*rfk) { 999 if (!*rfk) {
@@ -938,9 +1005,7 @@ static int __init tpacpi_new_rfkill(const unsigned int id,
938 (*rfk)->name = name; 1005 (*rfk)->name = name;
939 (*rfk)->get_state = get_state; 1006 (*rfk)->get_state = get_state;
940 (*rfk)->toggle_radio = toggle_radio; 1007 (*rfk)->toggle_radio = toggle_radio;
941 1008 (*rfk)->state = initial_state;
942 if (!get_state(NULL, &initial_state))
943 (*rfk)->state = initial_state;
944 1009
945 res = rfkill_register(*rfk); 1010 res = rfkill_register(*rfk);
946 if (res < 0) { 1011 if (res < 0) {
@@ -1006,6 +1071,119 @@ static DRIVER_ATTR(version, S_IRUGO,
1006 1071
1007/* --------------------------------------------------------------------- */ 1072/* --------------------------------------------------------------------- */
1008 1073
1074#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
1075
1076static void tpacpi_send_radiosw_update(void);
1077
1078/* wlsw_emulstate ------------------------------------------------------ */
1079static ssize_t tpacpi_driver_wlsw_emulstate_show(struct device_driver *drv,
1080 char *buf)
1081{
1082 return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_wlsw_emulstate);
1083}
1084
1085static ssize_t tpacpi_driver_wlsw_emulstate_store(struct device_driver *drv,
1086 const char *buf, size_t count)
1087{
1088 unsigned long t;
1089
1090 if (parse_strtoul(buf, 1, &t))
1091 return -EINVAL;
1092
1093 if (tpacpi_wlsw_emulstate != t) {
1094 tpacpi_wlsw_emulstate = !!t;
1095 tpacpi_send_radiosw_update();
1096 } else
1097 tpacpi_wlsw_emulstate = !!t;
1098
1099 return count;
1100}
1101
1102static DRIVER_ATTR(wlsw_emulstate, S_IWUSR | S_IRUGO,
1103 tpacpi_driver_wlsw_emulstate_show,
1104 tpacpi_driver_wlsw_emulstate_store);
1105
1106/* bluetooth_emulstate ------------------------------------------------- */
1107static ssize_t tpacpi_driver_bluetooth_emulstate_show(
1108 struct device_driver *drv,
1109 char *buf)
1110{
1111 return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_bluetooth_emulstate);
1112}
1113
1114static ssize_t tpacpi_driver_bluetooth_emulstate_store(
1115 struct device_driver *drv,
1116 const char *buf, size_t count)
1117{
1118 unsigned long t;
1119
1120 if (parse_strtoul(buf, 1, &t))
1121 return -EINVAL;
1122
1123 tpacpi_bluetooth_emulstate = !!t;
1124
1125 return count;
1126}
1127
1128static DRIVER_ATTR(bluetooth_emulstate, S_IWUSR | S_IRUGO,
1129 tpacpi_driver_bluetooth_emulstate_show,
1130 tpacpi_driver_bluetooth_emulstate_store);
1131
1132/* wwan_emulstate ------------------------------------------------- */
1133static ssize_t tpacpi_driver_wwan_emulstate_show(
1134 struct device_driver *drv,
1135 char *buf)
1136{
1137 return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_wwan_emulstate);
1138}
1139
1140static ssize_t tpacpi_driver_wwan_emulstate_store(
1141 struct device_driver *drv,
1142 const char *buf, size_t count)
1143{
1144 unsigned long t;
1145
1146 if (parse_strtoul(buf, 1, &t))
1147 return -EINVAL;
1148
1149 tpacpi_wwan_emulstate = !!t;
1150
1151 return count;
1152}
1153
1154static DRIVER_ATTR(wwan_emulstate, S_IWUSR | S_IRUGO,
1155 tpacpi_driver_wwan_emulstate_show,
1156 tpacpi_driver_wwan_emulstate_store);
1157
1158/* uwb_emulstate ------------------------------------------------- */
1159static ssize_t tpacpi_driver_uwb_emulstate_show(
1160 struct device_driver *drv,
1161 char *buf)
1162{
1163 return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_uwb_emulstate);
1164}
1165
1166static ssize_t tpacpi_driver_uwb_emulstate_store(
1167 struct device_driver *drv,
1168 const char *buf, size_t count)
1169{
1170 unsigned long t;
1171
1172 if (parse_strtoul(buf, 1, &t))
1173 return -EINVAL;
1174
1175 tpacpi_uwb_emulstate = !!t;
1176
1177 return count;
1178}
1179
1180static DRIVER_ATTR(uwb_emulstate, S_IWUSR | S_IRUGO,
1181 tpacpi_driver_uwb_emulstate_show,
1182 tpacpi_driver_uwb_emulstate_store);
1183#endif
1184
1185/* --------------------------------------------------------------------- */
1186
1009static struct driver_attribute *tpacpi_driver_attributes[] = { 1187static struct driver_attribute *tpacpi_driver_attributes[] = {
1010 &driver_attr_debug_level, &driver_attr_version, 1188 &driver_attr_debug_level, &driver_attr_version,
1011 &driver_attr_interface_version, 1189 &driver_attr_interface_version,
@@ -1022,6 +1200,17 @@ static int __init tpacpi_create_driver_attributes(struct device_driver *drv)
1022 i++; 1200 i++;
1023 } 1201 }
1024 1202
1203#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
1204 if (!res && dbg_wlswemul)
1205 res = driver_create_file(drv, &driver_attr_wlsw_emulstate);
1206 if (!res && dbg_bluetoothemul)
1207 res = driver_create_file(drv, &driver_attr_bluetooth_emulstate);
1208 if (!res && dbg_wwanemul)
1209 res = driver_create_file(drv, &driver_attr_wwan_emulstate);
1210 if (!res && dbg_uwbemul)
1211 res = driver_create_file(drv, &driver_attr_uwb_emulstate);
1212#endif
1213
1025 return res; 1214 return res;
1026} 1215}
1027 1216
@@ -1031,6 +1220,13 @@ static void tpacpi_remove_driver_attributes(struct device_driver *drv)
1031 1220
1032 for (i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++) 1221 for (i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++)
1033 driver_remove_file(drv, tpacpi_driver_attributes[i]); 1222 driver_remove_file(drv, tpacpi_driver_attributes[i]);
1223
1224#ifdef THINKPAD_ACPI_DEBUGFACILITIES
1225 driver_remove_file(drv, &driver_attr_wlsw_emulstate);
1226 driver_remove_file(drv, &driver_attr_bluetooth_emulstate);
1227 driver_remove_file(drv, &driver_attr_wwan_emulstate);
1228 driver_remove_file(drv, &driver_attr_uwb_emulstate);
1229#endif
1034} 1230}
1035 1231
1036/**************************************************************************** 1232/****************************************************************************
@@ -1216,6 +1412,12 @@ static struct attribute_set *hotkey_dev_attributes;
1216 1412
1217static int hotkey_get_wlsw(int *status) 1413static int hotkey_get_wlsw(int *status)
1218{ 1414{
1415#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
1416 if (dbg_wlswemul) {
1417 *status = !!tpacpi_wlsw_emulstate;
1418 return 0;
1419 }
1420#endif
1219 if (!acpi_evalf(hkey_handle, status, "WLSW", "d")) 1421 if (!acpi_evalf(hkey_handle, status, "WLSW", "d"))
1220 return -EIO; 1422 return -EIO;
1221 return 0; 1423 return 0;
@@ -1678,7 +1880,7 @@ static ssize_t hotkey_mask_show(struct device *dev,
1678{ 1880{
1679 int res; 1881 int res;
1680 1882
1681 if (mutex_lock_interruptible(&hotkey_mutex)) 1883 if (mutex_lock_killable(&hotkey_mutex))
1682 return -ERESTARTSYS; 1884 return -ERESTARTSYS;
1683 res = hotkey_mask_get(); 1885 res = hotkey_mask_get();
1684 mutex_unlock(&hotkey_mutex); 1886 mutex_unlock(&hotkey_mutex);
@@ -1697,7 +1899,7 @@ static ssize_t hotkey_mask_store(struct device *dev,
1697 if (parse_strtoul(buf, 0xffffffffUL, &t)) 1899 if (parse_strtoul(buf, 0xffffffffUL, &t))
1698 return -EINVAL; 1900 return -EINVAL;
1699 1901
1700 if (mutex_lock_interruptible(&hotkey_mutex)) 1902 if (mutex_lock_killable(&hotkey_mutex))
1701 return -ERESTARTSYS; 1903 return -ERESTARTSYS;
1702 1904
1703 res = hotkey_mask_set(t); 1905 res = hotkey_mask_set(t);
@@ -1783,7 +1985,7 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
1783 ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0)) 1985 ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
1784 return -EINVAL; 1986 return -EINVAL;
1785 1987
1786 if (mutex_lock_interruptible(&hotkey_mutex)) 1988 if (mutex_lock_killable(&hotkey_mutex))
1787 return -ERESTARTSYS; 1989 return -ERESTARTSYS;
1788 1990
1789 HOTKEY_CONFIG_CRITICAL_START 1991 HOTKEY_CONFIG_CRITICAL_START
@@ -1818,7 +2020,7 @@ static ssize_t hotkey_poll_freq_store(struct device *dev,
1818 if (parse_strtoul(buf, 25, &t)) 2020 if (parse_strtoul(buf, 25, &t))
1819 return -EINVAL; 2021 return -EINVAL;
1820 2022
1821 if (mutex_lock_interruptible(&hotkey_mutex)) 2023 if (mutex_lock_killable(&hotkey_mutex))
1822 return -ERESTARTSYS; 2024 return -ERESTARTSYS;
1823 2025
1824 hotkey_poll_freq = t; 2026 hotkey_poll_freq = t;
@@ -1958,6 +2160,7 @@ static struct attribute *hotkey_mask_attributes[] __initdata = {
1958 2160
1959static void bluetooth_update_rfk(void); 2161static void bluetooth_update_rfk(void);
1960static void wan_update_rfk(void); 2162static void wan_update_rfk(void);
2163static void uwb_update_rfk(void);
1961static void tpacpi_send_radiosw_update(void) 2164static void tpacpi_send_radiosw_update(void)
1962{ 2165{
1963 int wlsw; 2166 int wlsw;
@@ -1967,6 +2170,8 @@ static void tpacpi_send_radiosw_update(void)
1967 bluetooth_update_rfk(); 2170 bluetooth_update_rfk();
1968 if (tp_features.wan) 2171 if (tp_features.wan)
1969 wan_update_rfk(); 2172 wan_update_rfk();
2173 if (tp_features.uwb)
2174 uwb_update_rfk();
1970 2175
1971 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) { 2176 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
1972 mutex_lock(&tpacpi_inputdev_send_mutex); 2177 mutex_lock(&tpacpi_inputdev_send_mutex);
@@ -2222,6 +2427,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2222 hotkey_source_mask, hotkey_poll_freq); 2427 hotkey_source_mask, hotkey_poll_freq);
2223#endif 2428#endif
2224 2429
2430#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
2431 if (dbg_wlswemul) {
2432 tp_features.hotkey_wlsw = 1;
2433 printk(TPACPI_INFO
2434 "radio switch emulation enabled\n");
2435 } else
2436#endif
2225 /* Not all thinkpads have a hardware radio switch */ 2437 /* Not all thinkpads have a hardware radio switch */
2226 if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) { 2438 if (acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
2227 tp_features.hotkey_wlsw = 1; 2439 tp_features.hotkey_wlsw = 1;
@@ -2361,13 +2573,154 @@ err_exit:
2361 return (res < 0)? res : 1; 2573 return (res < 0)? res : 1;
2362} 2574}
2363 2575
2576static bool hotkey_notify_hotkey(const u32 hkey,
2577 bool *send_acpi_ev,
2578 bool *ignore_acpi_ev)
2579{
2580 /* 0x1000-0x1FFF: key presses */
2581 unsigned int scancode = hkey & 0xfff;
2582 *send_acpi_ev = true;
2583 *ignore_acpi_ev = false;
2584
2585 if (scancode > 0 && scancode < 0x21) {
2586 scancode--;
2587 if (!(hotkey_source_mask & (1 << scancode))) {
2588 tpacpi_input_send_key(scancode);
2589 *send_acpi_ev = false;
2590 } else {
2591 *ignore_acpi_ev = true;
2592 }
2593 return true;
2594 }
2595 return false;
2596}
2597
2598static bool hotkey_notify_wakeup(const u32 hkey,
2599 bool *send_acpi_ev,
2600 bool *ignore_acpi_ev)
2601{
2602 /* 0x2000-0x2FFF: Wakeup reason */
2603 *send_acpi_ev = true;
2604 *ignore_acpi_ev = false;
2605
2606 switch (hkey) {
2607 case 0x2304: /* suspend, undock */
2608 case 0x2404: /* hibernation, undock */
2609 hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
2610 *ignore_acpi_ev = true;
2611 break;
2612
2613 case 0x2305: /* suspend, bay eject */
2614 case 0x2405: /* hibernation, bay eject */
2615 hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
2616 *ignore_acpi_ev = true;
2617 break;
2618
2619 case 0x2313: /* Battery on critical low level (S3) */
2620 case 0x2413: /* Battery on critical low level (S4) */
2621 printk(TPACPI_ALERT
2622 "EMERGENCY WAKEUP: battery almost empty\n");
2623 /* how to auto-heal: */
2624 /* 2313: woke up from S3, go to S4/S5 */
2625 /* 2413: woke up from S4, go to S5 */
2626 break;
2627
2628 default:
2629 return false;
2630 }
2631
2632 if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
2633 printk(TPACPI_INFO
2634 "woke up due to a hot-unplug "
2635 "request...\n");
2636 hotkey_wakeup_reason_notify_change();
2637 }
2638 return true;
2639}
2640
2641static bool hotkey_notify_usrevent(const u32 hkey,
2642 bool *send_acpi_ev,
2643 bool *ignore_acpi_ev)
2644{
2645 /* 0x5000-0x5FFF: human interface helpers */
2646 *send_acpi_ev = true;
2647 *ignore_acpi_ev = false;
2648
2649 switch (hkey) {
2650 case 0x5010: /* Lenovo new BIOS: brightness changed */
2651 case 0x500b: /* X61t: tablet pen inserted into bay */
2652 case 0x500c: /* X61t: tablet pen removed from bay */
2653 return true;
2654
2655 case 0x5009: /* X41t-X61t: swivel up (tablet mode) */
2656 case 0x500a: /* X41t-X61t: swivel down (normal mode) */
2657 tpacpi_input_send_tabletsw();
2658 hotkey_tablet_mode_notify_change();
2659 *send_acpi_ev = false;
2660 return true;
2661
2662 case 0x5001:
2663 case 0x5002:
2664 /* LID switch events. Do not propagate */
2665 *ignore_acpi_ev = true;
2666 return true;
2667
2668 default:
2669 return false;
2670 }
2671}
2672
2673static bool hotkey_notify_thermal(const u32 hkey,
2674 bool *send_acpi_ev,
2675 bool *ignore_acpi_ev)
2676{
2677 /* 0x6000-0x6FFF: thermal alarms */
2678 *send_acpi_ev = true;
2679 *ignore_acpi_ev = false;
2680
2681 switch (hkey) {
2682 case 0x6011:
2683 printk(TPACPI_CRIT
2684 "THERMAL ALARM: battery is too hot!\n");
2685 /* recommended action: warn user through gui */
2686 return true;
2687 case 0x6012:
2688 printk(TPACPI_ALERT
2689 "THERMAL EMERGENCY: battery is extremely hot!\n");
2690 /* recommended action: immediate sleep/hibernate */
2691 return true;
2692 case 0x6021:
2693 printk(TPACPI_CRIT
2694 "THERMAL ALARM: "
2695 "a sensor reports something is too hot!\n");
2696 /* recommended action: warn user through gui, that */
2697 /* some internal component is too hot */
2698 return true;
2699 case 0x6022:
2700 printk(TPACPI_ALERT
2701 "THERMAL EMERGENCY: "
2702 "a sensor reports something is extremely hot!\n");
2703 /* recommended action: immediate sleep/hibernate */
2704 return true;
2705 case 0x6030:
2706 printk(TPACPI_INFO
2707 "EC reports that Thermal Table has changed\n");
2708 /* recommended action: do nothing, we don't have
2709 * Lenovo ATM information */
2710 return true;
2711 default:
2712 printk(TPACPI_ALERT
2713 "THERMAL ALERT: unknown thermal alarm received\n");
2714 return false;
2715 }
2716}
2717
2364static void hotkey_notify(struct ibm_struct *ibm, u32 event) 2718static void hotkey_notify(struct ibm_struct *ibm, u32 event)
2365{ 2719{
2366 u32 hkey; 2720 u32 hkey;
2367 unsigned int scancode; 2721 bool send_acpi_ev;
2368 int send_acpi_ev; 2722 bool ignore_acpi_ev;
2369 int ignore_acpi_ev; 2723 bool known_ev;
2370 int unk_ev;
2371 2724
2372 if (event != 0x80) { 2725 if (event != 0x80) {
2373 printk(TPACPI_ERR 2726 printk(TPACPI_ERR
@@ -2375,7 +2728,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
2375 /* forward it to userspace, maybe it knows how to handle it */ 2728 /* forward it to userspace, maybe it knows how to handle it */
2376 acpi_bus_generate_netlink_event( 2729 acpi_bus_generate_netlink_event(
2377 ibm->acpi->device->pnp.device_class, 2730 ibm->acpi->device->pnp.device_class,
2378 ibm->acpi->device->dev.bus_id, 2731 dev_name(&ibm->acpi->device->dev),
2379 event, 0); 2732 event, 0);
2380 return; 2733 return;
2381 } 2734 }
@@ -2391,107 +2744,72 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
2391 return; 2744 return;
2392 } 2745 }
2393 2746
2394 send_acpi_ev = 1; 2747 send_acpi_ev = true;
2395 ignore_acpi_ev = 0; 2748 ignore_acpi_ev = false;
2396 unk_ev = 0;
2397 2749
2398 switch (hkey >> 12) { 2750 switch (hkey >> 12) {
2399 case 1: 2751 case 1:
2400 /* 0x1000-0x1FFF: key presses */ 2752 /* 0x1000-0x1FFF: key presses */
2401 scancode = hkey & 0xfff; 2753 known_ev = hotkey_notify_hotkey(hkey, &send_acpi_ev,
2402 if (scancode > 0 && scancode < 0x21) { 2754 &ignore_acpi_ev);
2403 scancode--;
2404 if (!(hotkey_source_mask & (1 << scancode))) {
2405 tpacpi_input_send_key(scancode);
2406 send_acpi_ev = 0;
2407 } else {
2408 ignore_acpi_ev = 1;
2409 }
2410 } else {
2411 unk_ev = 1;
2412 }
2413 break; 2755 break;
2414 case 2: 2756 case 2:
2415 /* Wakeup reason */ 2757 /* 0x2000-0x2FFF: Wakeup reason */
2416 switch (hkey) { 2758 known_ev = hotkey_notify_wakeup(hkey, &send_acpi_ev,
2417 case 0x2304: /* suspend, undock */ 2759 &ignore_acpi_ev);
2418 case 0x2404: /* hibernation, undock */
2419 hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
2420 ignore_acpi_ev = 1;
2421 break;
2422 case 0x2305: /* suspend, bay eject */
2423 case 0x2405: /* hibernation, bay eject */
2424 hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
2425 ignore_acpi_ev = 1;
2426 break;
2427 default:
2428 unk_ev = 1;
2429 }
2430 if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
2431 printk(TPACPI_INFO
2432 "woke up due to a hot-unplug "
2433 "request...\n");
2434 hotkey_wakeup_reason_notify_change();
2435 }
2436 break; 2760 break;
2437 case 3: 2761 case 3:
2438 /* bay-related wakeups */ 2762 /* 0x3000-0x3FFF: bay-related wakeups */
2439 if (hkey == 0x3003) { 2763 if (hkey == 0x3003) {
2440 hotkey_autosleep_ack = 1; 2764 hotkey_autosleep_ack = 1;
2441 printk(TPACPI_INFO 2765 printk(TPACPI_INFO
2442 "bay ejected\n"); 2766 "bay ejected\n");
2443 hotkey_wakeup_hotunplug_complete_notify_change(); 2767 hotkey_wakeup_hotunplug_complete_notify_change();
2768 known_ev = true;
2444 } else { 2769 } else {
2445 unk_ev = 1; 2770 known_ev = false;
2446 } 2771 }
2447 break; 2772 break;
2448 case 4: 2773 case 4:
2449 /* dock-related wakeups */ 2774 /* 0x4000-0x4FFF: dock-related wakeups */
2450 if (hkey == 0x4003) { 2775 if (hkey == 0x4003) {
2451 hotkey_autosleep_ack = 1; 2776 hotkey_autosleep_ack = 1;
2452 printk(TPACPI_INFO 2777 printk(TPACPI_INFO
2453 "undocked\n"); 2778 "undocked\n");
2454 hotkey_wakeup_hotunplug_complete_notify_change(); 2779 hotkey_wakeup_hotunplug_complete_notify_change();
2780 known_ev = true;
2455 } else { 2781 } else {
2456 unk_ev = 1; 2782 known_ev = false;
2457 } 2783 }
2458 break; 2784 break;
2459 case 5: 2785 case 5:
2460 /* 0x5000-0x5FFF: human interface helpers */ 2786 /* 0x5000-0x5FFF: human interface helpers */
2461 switch (hkey) { 2787 known_ev = hotkey_notify_usrevent(hkey, &send_acpi_ev,
2462 case 0x5010: /* Lenovo new BIOS: brightness changed */ 2788 &ignore_acpi_ev);
2463 case 0x500b: /* X61t: tablet pen inserted into bay */ 2789 break;
2464 case 0x500c: /* X61t: tablet pen removed from bay */ 2790 case 6:
2465 break; 2791 /* 0x6000-0x6FFF: thermal alarms */
2466 case 0x5009: /* X41t-X61t: swivel up (tablet mode) */ 2792 known_ev = hotkey_notify_thermal(hkey, &send_acpi_ev,
2467 case 0x500a: /* X41t-X61t: swivel down (normal mode) */ 2793 &ignore_acpi_ev);
2468 tpacpi_input_send_tabletsw();
2469 hotkey_tablet_mode_notify_change();
2470 send_acpi_ev = 0;
2471 break;
2472 case 0x5001:
2473 case 0x5002:
2474 /* LID switch events. Do not propagate */
2475 ignore_acpi_ev = 1;
2476 break;
2477 default:
2478 unk_ev = 1;
2479 }
2480 break; 2794 break;
2481 case 7: 2795 case 7:
2482 /* 0x7000-0x7FFF: misc */ 2796 /* 0x7000-0x7FFF: misc */
2483 if (tp_features.hotkey_wlsw && hkey == 0x7000) { 2797 if (tp_features.hotkey_wlsw && hkey == 0x7000) {
2484 tpacpi_send_radiosw_update(); 2798 tpacpi_send_radiosw_update();
2485 send_acpi_ev = 0; 2799 send_acpi_ev = 0;
2800 known_ev = true;
2486 break; 2801 break;
2487 } 2802 }
2488 /* fallthrough to default */ 2803 /* fallthrough to default */
2489 default: 2804 default:
2490 unk_ev = 1; 2805 known_ev = false;
2491 } 2806 }
2492 if (unk_ev) { 2807 if (!known_ev) {
2493 printk(TPACPI_NOTICE 2808 printk(TPACPI_NOTICE
2494 "unhandled HKEY event 0x%04x\n", hkey); 2809 "unhandled HKEY event 0x%04x\n", hkey);
2810 printk(TPACPI_NOTICE
2811 "please report the conditions when this "
2812 "event happened to %s\n", TPACPI_MAIL);
2495 } 2813 }
2496 2814
2497 /* Legacy events */ 2815 /* Legacy events */
@@ -2505,7 +2823,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
2505 if (!ignore_acpi_ev && send_acpi_ev) { 2823 if (!ignore_acpi_ev && send_acpi_ev) {
2506 acpi_bus_generate_netlink_event( 2824 acpi_bus_generate_netlink_event(
2507 ibm->acpi->device->pnp.device_class, 2825 ibm->acpi->device->pnp.device_class,
2508 ibm->acpi->device->dev.bus_id, 2826 dev_name(&ibm->acpi->device->dev),
2509 event, hkey); 2827 event, hkey);
2510 } 2828 }
2511 } 2829 }
@@ -2544,7 +2862,7 @@ static int hotkey_read(char *p)
2544 return len; 2862 return len;
2545 } 2863 }
2546 2864
2547 if (mutex_lock_interruptible(&hotkey_mutex)) 2865 if (mutex_lock_killable(&hotkey_mutex))
2548 return -ERESTARTSYS; 2866 return -ERESTARTSYS;
2549 res = hotkey_status_get(&status); 2867 res = hotkey_status_get(&status);
2550 if (!res) 2868 if (!res)
@@ -2575,7 +2893,7 @@ static int hotkey_write(char *buf)
2575 if (!tp_features.hotkey) 2893 if (!tp_features.hotkey)
2576 return -ENODEV; 2894 return -ENODEV;
2577 2895
2578 if (mutex_lock_interruptible(&hotkey_mutex)) 2896 if (mutex_lock_killable(&hotkey_mutex))
2579 return -ERESTARTSYS; 2897 return -ERESTARTSYS;
2580 2898
2581 status = -1; 2899 status = -1;
@@ -2640,11 +2958,28 @@ enum {
2640 /* ACPI GBDC/SBDC bits */ 2958 /* ACPI GBDC/SBDC bits */
2641 TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */ 2959 TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
2642 TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */ 2960 TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
2643 TP_ACPI_BLUETOOTH_UNK = 0x04, /* unknown function */ 2961 TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume:
2962 off / last state */
2963};
2964
2965enum {
2966 /* ACPI \BLTH commands */
2967 TP_ACPI_BLTH_GET_ULTRAPORT_ID = 0x00, /* Get Ultraport BT ID */
2968 TP_ACPI_BLTH_GET_PWR_ON_RESUME = 0x01, /* Get power-on-resume state */
2969 TP_ACPI_BLTH_PWR_ON_ON_RESUME = 0x02, /* Resume powered on */
2970 TP_ACPI_BLTH_PWR_OFF_ON_RESUME = 0x03, /* Resume powered off */
2971 TP_ACPI_BLTH_SAVE_STATE = 0x05, /* Save state for S4/S5 */
2644}; 2972};
2645 2973
2646static struct rfkill *tpacpi_bluetooth_rfkill; 2974static struct rfkill *tpacpi_bluetooth_rfkill;
2647 2975
2976static void bluetooth_suspend(pm_message_t state)
2977{
2978 /* Try to make sure radio will resume powered off */
2979 acpi_evalf(NULL, NULL, "\\BLTH", "vd",
2980 TP_ACPI_BLTH_PWR_OFF_ON_RESUME);
2981}
2982
2648static int bluetooth_get_radiosw(void) 2983static int bluetooth_get_radiosw(void)
2649{ 2984{
2650 int status; 2985 int status;
@@ -2656,6 +2991,12 @@ static int bluetooth_get_radiosw(void)
2656 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status) 2991 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
2657 return RFKILL_STATE_HARD_BLOCKED; 2992 return RFKILL_STATE_HARD_BLOCKED;
2658 2993
2994#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
2995 if (dbg_bluetoothemul)
2996 return (tpacpi_bluetooth_emulstate) ?
2997 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
2998#endif
2999
2659 if (!acpi_evalf(hkey_handle, &status, "GBDC", "d")) 3000 if (!acpi_evalf(hkey_handle, &status, "GBDC", "d"))
2660 return -EIO; 3001 return -EIO;
2661 3002
@@ -2689,12 +3030,20 @@ static int bluetooth_set_radiosw(int radio_on, int update_rfk)
2689 && radio_on) 3030 && radio_on)
2690 return -EPERM; 3031 return -EPERM;
2691 3032
2692 if (!acpi_evalf(hkey_handle, &status, "GBDC", "d")) 3033#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
2693 return -EIO; 3034 if (dbg_bluetoothemul) {
3035 tpacpi_bluetooth_emulstate = !!radio_on;
3036 if (update_rfk)
3037 bluetooth_update_rfk();
3038 return 0;
3039 }
3040#endif
3041
3042 /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
2694 if (radio_on) 3043 if (radio_on)
2695 status |= TP_ACPI_BLUETOOTH_RADIOSSW; 3044 status = TP_ACPI_BLUETOOTH_RADIOSSW;
2696 else 3045 else
2697 status &= ~TP_ACPI_BLUETOOTH_RADIOSSW; 3046 status = 0;
2698 if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status)) 3047 if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
2699 return -EIO; 3048 return -EIO;
2700 3049
@@ -2765,8 +3114,19 @@ static int tpacpi_bluetooth_rfk_set(void *data, enum rfkill_state state)
2765 return bluetooth_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0); 3114 return bluetooth_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
2766} 3115}
2767 3116
3117static void bluetooth_shutdown(void)
3118{
3119 /* Order firmware to save current state to NVRAM */
3120 if (!acpi_evalf(NULL, NULL, "\\BLTH", "vd",
3121 TP_ACPI_BLTH_SAVE_STATE))
3122 printk(TPACPI_NOTICE
3123 "failed to save bluetooth state to NVRAM\n");
3124}
3125
2768static void bluetooth_exit(void) 3126static void bluetooth_exit(void)
2769{ 3127{
3128 bluetooth_shutdown();
3129
2770 if (tpacpi_bluetooth_rfkill) 3130 if (tpacpi_bluetooth_rfkill)
2771 rfkill_unregister(tpacpi_bluetooth_rfkill); 3131 rfkill_unregister(tpacpi_bluetooth_rfkill);
2772 3132
@@ -2792,6 +3152,13 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
2792 str_supported(tp_features.bluetooth), 3152 str_supported(tp_features.bluetooth),
2793 status); 3153 status);
2794 3154
3155#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
3156 if (dbg_bluetoothemul) {
3157 tp_features.bluetooth = 1;
3158 printk(TPACPI_INFO
3159 "bluetooth switch emulation enabled\n");
3160 } else
3161#endif
2795 if (tp_features.bluetooth && 3162 if (tp_features.bluetooth &&
2796 !(status & TP_ACPI_BLUETOOTH_HWPRESENT)) { 3163 !(status & TP_ACPI_BLUETOOTH_HWPRESENT)) {
2797 /* no bluetooth hardware present in system */ 3164 /* no bluetooth hardware present in system */
@@ -2812,6 +3179,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
2812 &tpacpi_bluetooth_rfkill, 3179 &tpacpi_bluetooth_rfkill,
2813 RFKILL_TYPE_BLUETOOTH, 3180 RFKILL_TYPE_BLUETOOTH,
2814 "tpacpi_bluetooth_sw", 3181 "tpacpi_bluetooth_sw",
3182 true,
2815 tpacpi_bluetooth_rfk_set, 3183 tpacpi_bluetooth_rfk_set,
2816 tpacpi_bluetooth_rfk_get); 3184 tpacpi_bluetooth_rfk_get);
2817 if (res) { 3185 if (res) {
@@ -2864,6 +3232,8 @@ static struct ibm_struct bluetooth_driver_data = {
2864 .read = bluetooth_read, 3232 .read = bluetooth_read,
2865 .write = bluetooth_write, 3233 .write = bluetooth_write,
2866 .exit = bluetooth_exit, 3234 .exit = bluetooth_exit,
3235 .suspend = bluetooth_suspend,
3236 .shutdown = bluetooth_shutdown,
2867}; 3237};
2868 3238
2869/************************************************************************* 3239/*************************************************************************
@@ -2874,11 +3244,19 @@ enum {
2874 /* ACPI GWAN/SWAN bits */ 3244 /* ACPI GWAN/SWAN bits */
2875 TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */ 3245 TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
2876 TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */ 3246 TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
2877 TP_ACPI_WANCARD_UNK = 0x04, /* unknown function */ 3247 TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume:
3248 off / last state */
2878}; 3249};
2879 3250
2880static struct rfkill *tpacpi_wan_rfkill; 3251static struct rfkill *tpacpi_wan_rfkill;
2881 3252
3253static void wan_suspend(pm_message_t state)
3254{
3255 /* Try to make sure radio will resume powered off */
3256 acpi_evalf(NULL, NULL, "\\WGSV", "qvd",
3257 TP_ACPI_WGSV_PWR_OFF_ON_RESUME);
3258}
3259
2882static int wan_get_radiosw(void) 3260static int wan_get_radiosw(void)
2883{ 3261{
2884 int status; 3262 int status;
@@ -2890,6 +3268,12 @@ static int wan_get_radiosw(void)
2890 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status) 3268 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
2891 return RFKILL_STATE_HARD_BLOCKED; 3269 return RFKILL_STATE_HARD_BLOCKED;
2892 3270
3271#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
3272 if (dbg_wwanemul)
3273 return (tpacpi_wwan_emulstate) ?
3274 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
3275#endif
3276
2893 if (!acpi_evalf(hkey_handle, &status, "GWAN", "d")) 3277 if (!acpi_evalf(hkey_handle, &status, "GWAN", "d"))
2894 return -EIO; 3278 return -EIO;
2895 3279
@@ -2923,12 +3307,20 @@ static int wan_set_radiosw(int radio_on, int update_rfk)
2923 && radio_on) 3307 && radio_on)
2924 return -EPERM; 3308 return -EPERM;
2925 3309
2926 if (!acpi_evalf(hkey_handle, &status, "GWAN", "d")) 3310#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
2927 return -EIO; 3311 if (dbg_wwanemul) {
3312 tpacpi_wwan_emulstate = !!radio_on;
3313 if (update_rfk)
3314 wan_update_rfk();
3315 return 0;
3316 }
3317#endif
3318
3319 /* We make sure to keep TP_ACPI_WANCARD_RESUMECTRL off */
2928 if (radio_on) 3320 if (radio_on)
2929 status |= TP_ACPI_WANCARD_RADIOSSW; 3321 status = TP_ACPI_WANCARD_RADIOSSW;
2930 else 3322 else
2931 status &= ~TP_ACPI_WANCARD_RADIOSSW; 3323 status = 0;
2932 if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status)) 3324 if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
2933 return -EIO; 3325 return -EIO;
2934 3326
@@ -2999,8 +3391,19 @@ static int tpacpi_wan_rfk_set(void *data, enum rfkill_state state)
2999 return wan_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0); 3391 return wan_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
3000} 3392}
3001 3393
3394static void wan_shutdown(void)
3395{
3396 /* Order firmware to save current state to NVRAM */
3397 if (!acpi_evalf(NULL, NULL, "\\WGSV", "vd",
3398 TP_ACPI_WGSV_SAVE_STATE))
3399 printk(TPACPI_NOTICE
3400 "failed to save WWAN state to NVRAM\n");
3401}
3402
3002static void wan_exit(void) 3403static void wan_exit(void)
3003{ 3404{
3405 wan_shutdown();
3406
3004 if (tpacpi_wan_rfkill) 3407 if (tpacpi_wan_rfkill)
3005 rfkill_unregister(tpacpi_wan_rfkill); 3408 rfkill_unregister(tpacpi_wan_rfkill);
3006 3409
@@ -3024,6 +3427,13 @@ static int __init wan_init(struct ibm_init_struct *iibm)
3024 str_supported(tp_features.wan), 3427 str_supported(tp_features.wan),
3025 status); 3428 status);
3026 3429
3430#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
3431 if (dbg_wwanemul) {
3432 tp_features.wan = 1;
3433 printk(TPACPI_INFO
3434 "wwan switch emulation enabled\n");
3435 } else
3436#endif
3027 if (tp_features.wan && 3437 if (tp_features.wan &&
3028 !(status & TP_ACPI_WANCARD_HWPRESENT)) { 3438 !(status & TP_ACPI_WANCARD_HWPRESENT)) {
3029 /* no wan hardware present in system */ 3439 /* no wan hardware present in system */
@@ -3044,6 +3454,7 @@ static int __init wan_init(struct ibm_init_struct *iibm)
3044 &tpacpi_wan_rfkill, 3454 &tpacpi_wan_rfkill,
3045 RFKILL_TYPE_WWAN, 3455 RFKILL_TYPE_WWAN,
3046 "tpacpi_wwan_sw", 3456 "tpacpi_wwan_sw",
3457 true,
3047 tpacpi_wan_rfk_set, 3458 tpacpi_wan_rfk_set,
3048 tpacpi_wan_rfk_get); 3459 tpacpi_wan_rfk_get);
3049 if (res) { 3460 if (res) {
@@ -3096,6 +3507,164 @@ static struct ibm_struct wan_driver_data = {
3096 .read = wan_read, 3507 .read = wan_read,
3097 .write = wan_write, 3508 .write = wan_write,
3098 .exit = wan_exit, 3509 .exit = wan_exit,
3510 .suspend = wan_suspend,
3511 .shutdown = wan_shutdown,
3512};
3513
3514/*************************************************************************
3515 * UWB subdriver
3516 */
3517
3518enum {
3519 /* ACPI GUWB/SUWB bits */
3520 TP_ACPI_UWB_HWPRESENT = 0x01, /* UWB hw available */
3521 TP_ACPI_UWB_RADIOSSW = 0x02, /* UWB radio enabled */
3522};
3523
3524static struct rfkill *tpacpi_uwb_rfkill;
3525
3526static int uwb_get_radiosw(void)
3527{
3528 int status;
3529
3530 if (!tp_features.uwb)
3531 return -ENODEV;
3532
3533 /* WLSW overrides UWB in firmware/hardware, reflect that */
3534 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status)
3535 return RFKILL_STATE_HARD_BLOCKED;
3536
3537#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
3538 if (dbg_uwbemul)
3539 return (tpacpi_uwb_emulstate) ?
3540 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
3541#endif
3542
3543 if (!acpi_evalf(hkey_handle, &status, "GUWB", "d"))
3544 return -EIO;
3545
3546 return ((status & TP_ACPI_UWB_RADIOSSW) != 0) ?
3547 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED;
3548}
3549
3550static void uwb_update_rfk(void)
3551{
3552 int status;
3553
3554 if (!tpacpi_uwb_rfkill)
3555 return;
3556
3557 status = uwb_get_radiosw();
3558 if (status < 0)
3559 return;
3560 rfkill_force_state(tpacpi_uwb_rfkill, status);
3561}
3562
3563static int uwb_set_radiosw(int radio_on, int update_rfk)
3564{
3565 int status;
3566
3567 if (!tp_features.uwb)
3568 return -ENODEV;
3569
3570 /* WLSW overrides UWB in firmware/hardware, but there is no
3571 * reason to risk weird behaviour. */
3572 if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&status) && !status
3573 && radio_on)
3574 return -EPERM;
3575
3576#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
3577 if (dbg_uwbemul) {
3578 tpacpi_uwb_emulstate = !!radio_on;
3579 if (update_rfk)
3580 uwb_update_rfk();
3581 return 0;
3582 }
3583#endif
3584
3585 status = (radio_on) ? TP_ACPI_UWB_RADIOSSW : 0;
3586 if (!acpi_evalf(hkey_handle, NULL, "SUWB", "vd", status))
3587 return -EIO;
3588
3589 if (update_rfk)
3590 uwb_update_rfk();
3591
3592 return 0;
3593}
3594
3595/* --------------------------------------------------------------------- */
3596
3597static int tpacpi_uwb_rfk_get(void *data, enum rfkill_state *state)
3598{
3599 int uwbs = uwb_get_radiosw();
3600
3601 if (uwbs < 0)
3602 return uwbs;
3603
3604 *state = uwbs;
3605 return 0;
3606}
3607
3608static int tpacpi_uwb_rfk_set(void *data, enum rfkill_state state)
3609{
3610 return uwb_set_radiosw((state == RFKILL_STATE_UNBLOCKED), 0);
3611}
3612
3613static void uwb_exit(void)
3614{
3615 if (tpacpi_uwb_rfkill)
3616 rfkill_unregister(tpacpi_uwb_rfkill);
3617}
3618
3619static int __init uwb_init(struct ibm_init_struct *iibm)
3620{
3621 int res;
3622 int status = 0;
3623
3624 vdbg_printk(TPACPI_DBG_INIT, "initializing uwb subdriver\n");
3625
3626 TPACPI_ACPIHANDLE_INIT(hkey);
3627
3628 tp_features.uwb = hkey_handle &&
3629 acpi_evalf(hkey_handle, &status, "GUWB", "qd");
3630
3631 vdbg_printk(TPACPI_DBG_INIT, "uwb is %s, status 0x%02x\n",
3632 str_supported(tp_features.uwb),
3633 status);
3634
3635#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
3636 if (dbg_uwbemul) {
3637 tp_features.uwb = 1;
3638 printk(TPACPI_INFO
3639 "uwb switch emulation enabled\n");
3640 } else
3641#endif
3642 if (tp_features.uwb &&
3643 !(status & TP_ACPI_UWB_HWPRESENT)) {
3644 /* no uwb hardware present in system */
3645 tp_features.uwb = 0;
3646 dbg_printk(TPACPI_DBG_INIT,
3647 "uwb hardware not installed\n");
3648 }
3649
3650 if (!tp_features.uwb)
3651 return 1;
3652
3653 res = tpacpi_new_rfkill(TPACPI_RFK_UWB_SW_ID,
3654 &tpacpi_uwb_rfkill,
3655 RFKILL_TYPE_UWB,
3656 "tpacpi_uwb_sw",
3657 false,
3658 tpacpi_uwb_rfk_set,
3659 tpacpi_uwb_rfk_get);
3660
3661 return res;
3662}
3663
3664static struct ibm_struct uwb_driver_data = {
3665 .name = "uwb",
3666 .exit = uwb_exit,
3667 .flags.experimental = 1,
3099}; 3668};
3100 3669
3101/************************************************************************* 3670/*************************************************************************
@@ -3724,7 +4293,7 @@ static void dock_notify(struct ibm_struct *ibm, u32 event)
3724 } 4293 }
3725 acpi_bus_generate_proc_event(ibm->acpi->device, event, data); 4294 acpi_bus_generate_proc_event(ibm->acpi->device, event, data);
3726 acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class, 4295 acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
3727 ibm->acpi->device->dev.bus_id, 4296 dev_name(&ibm->acpi->device->dev),
3728 event, data); 4297 event, data);
3729} 4298}
3730 4299
@@ -3826,7 +4395,7 @@ static void bay_notify(struct ibm_struct *ibm, u32 event)
3826{ 4395{
3827 acpi_bus_generate_proc_event(ibm->acpi->device, event, 0); 4396 acpi_bus_generate_proc_event(ibm->acpi->device, event, 0);
3828 acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class, 4397 acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
3829 ibm->acpi->device->dev.bus_id, 4398 dev_name(&ibm->acpi->device->dev),
3830 event, 0); 4399 event, 0);
3831} 4400}
3832 4401
@@ -4850,7 +5419,7 @@ static int brightness_set(int value)
4850 value < 0) 5419 value < 0)
4851 return -EINVAL; 5420 return -EINVAL;
4852 5421
4853 res = mutex_lock_interruptible(&brightness_mutex); 5422 res = mutex_lock_killable(&brightness_mutex);
4854 if (res < 0) 5423 if (res < 0)
4855 return res; 5424 return res;
4856 5425
@@ -5334,6 +5903,60 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
5334 ); /* all others */ 5903 ); /* all others */
5335 5904
5336/* 5905/*
5906 * Unitialized HFSP quirk: ACPI DSDT and EC fail to initialize the
5907 * HFSP register at boot, so it contains 0x07 but the Thinkpad could
5908 * be in auto mode (0x80).
5909 *
5910 * This is corrected by any write to HFSP either by the driver, or
5911 * by the firmware.
5912 *
5913 * We assume 0x07 really means auto mode while this quirk is active,
5914 * as this is far more likely than the ThinkPad being in level 7,
5915 * which is only used by the firmware during thermal emergencies.
5916 */
5917
5918static void fan_quirk1_detect(void)
5919{
5920 /* In some ThinkPads, neither the EC nor the ACPI
5921 * DSDT initialize the HFSP register, and it ends up
5922 * being initially set to 0x07 when it *could* be
5923 * either 0x07 or 0x80.
5924 *
5925 * Enable for TP-1Y (T43), TP-78 (R51e),
5926 * TP-76 (R52), TP-70 (T43, R52), which are known
5927 * to be buggy. */
5928 if (fan_control_initial_status == 0x07) {
5929 switch (thinkpad_id.ec_model) {
5930 case 0x5931: /* TP-1Y */
5931 case 0x3837: /* TP-78 */
5932 case 0x3637: /* TP-76 */
5933 case 0x3037: /* TP-70 */
5934 printk(TPACPI_NOTICE
5935 "fan_init: initial fan status is unknown, "
5936 "assuming it is in auto mode\n");
5937 tp_features.fan_ctrl_status_undef = 1;
5938 ;;
5939 }
5940 }
5941}
5942
5943static void fan_quirk1_handle(u8 *fan_status)
5944{
5945 if (unlikely(tp_features.fan_ctrl_status_undef)) {
5946 if (*fan_status != fan_control_initial_status) {
5947 /* something changed the HFSP regisnter since
5948 * driver init time, so it is not undefined
5949 * anymore */
5950 tp_features.fan_ctrl_status_undef = 0;
5951 } else {
5952 /* Return most likely status. In fact, it
5953 * might be the only possible status */
5954 *fan_status = TP_EC_FAN_AUTO;
5955 }
5956 }
5957}
5958
5959/*
5337 * Call with fan_mutex held 5960 * Call with fan_mutex held
5338 */ 5961 */
5339static void fan_update_desired_level(u8 status) 5962static void fan_update_desired_level(u8 status)
@@ -5371,8 +5994,10 @@ static int fan_get_status(u8 *status)
5371 if (unlikely(!acpi_ec_read(fan_status_offset, &s))) 5994 if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
5372 return -EIO; 5995 return -EIO;
5373 5996
5374 if (likely(status)) 5997 if (likely(status)) {
5375 *status = s; 5998 *status = s;
5999 fan_quirk1_handle(status);
6000 }
5376 6001
5377 break; 6002 break;
5378 6003
@@ -5388,7 +6013,7 @@ static int fan_get_status_safe(u8 *status)
5388 int rc; 6013 int rc;
5389 u8 s; 6014 u8 s;
5390 6015
5391 if (mutex_lock_interruptible(&fan_mutex)) 6016 if (mutex_lock_killable(&fan_mutex))
5392 return -ERESTARTSYS; 6017 return -ERESTARTSYS;
5393 rc = fan_get_status(&s); 6018 rc = fan_get_status(&s);
5394 if (!rc) 6019 if (!rc)
@@ -5471,7 +6096,7 @@ static int fan_set_level_safe(int level)
5471 if (!fan_control_allowed) 6096 if (!fan_control_allowed)
5472 return -EPERM; 6097 return -EPERM;
5473 6098
5474 if (mutex_lock_interruptible(&fan_mutex)) 6099 if (mutex_lock_killable(&fan_mutex))
5475 return -ERESTARTSYS; 6100 return -ERESTARTSYS;
5476 6101
5477 if (level == TPACPI_FAN_LAST_LEVEL) 6102 if (level == TPACPI_FAN_LAST_LEVEL)
@@ -5493,7 +6118,7 @@ static int fan_set_enable(void)
5493 if (!fan_control_allowed) 6118 if (!fan_control_allowed)
5494 return -EPERM; 6119 return -EPERM;
5495 6120
5496 if (mutex_lock_interruptible(&fan_mutex)) 6121 if (mutex_lock_killable(&fan_mutex))
5497 return -ERESTARTSYS; 6122 return -ERESTARTSYS;
5498 6123
5499 switch (fan_control_access_mode) { 6124 switch (fan_control_access_mode) {
@@ -5548,7 +6173,7 @@ static int fan_set_disable(void)
5548 if (!fan_control_allowed) 6173 if (!fan_control_allowed)
5549 return -EPERM; 6174 return -EPERM;
5550 6175
5551 if (mutex_lock_interruptible(&fan_mutex)) 6176 if (mutex_lock_killable(&fan_mutex))
5552 return -ERESTARTSYS; 6177 return -ERESTARTSYS;
5553 6178
5554 rc = 0; 6179 rc = 0;
@@ -5586,7 +6211,7 @@ static int fan_set_speed(int speed)
5586 if (!fan_control_allowed) 6211 if (!fan_control_allowed)
5587 return -EPERM; 6212 return -EPERM;
5588 6213
5589 if (mutex_lock_interruptible(&fan_mutex)) 6214 if (mutex_lock_killable(&fan_mutex))
5590 return -ERESTARTSYS; 6215 return -ERESTARTSYS;
5591 6216
5592 rc = 0; 6217 rc = 0;
@@ -5682,16 +6307,6 @@ static ssize_t fan_pwm1_enable_show(struct device *dev,
5682 if (res) 6307 if (res)
5683 return res; 6308 return res;
5684 6309
5685 if (unlikely(tp_features.fan_ctrl_status_undef)) {
5686 if (status != fan_control_initial_status) {
5687 tp_features.fan_ctrl_status_undef = 0;
5688 } else {
5689 /* Return most likely status. In fact, it
5690 * might be the only possible status */
5691 status = TP_EC_FAN_AUTO;
5692 }
5693 }
5694
5695 if (status & TP_EC_FAN_FULLSPEED) { 6310 if (status & TP_EC_FAN_FULLSPEED) {
5696 mode = 0; 6311 mode = 0;
5697 } else if (status & TP_EC_FAN_AUTO) { 6312 } else if (status & TP_EC_FAN_AUTO) {
@@ -5756,14 +6371,6 @@ static ssize_t fan_pwm1_show(struct device *dev,
5756 if (res) 6371 if (res)
5757 return res; 6372 return res;
5758 6373
5759 if (unlikely(tp_features.fan_ctrl_status_undef)) {
5760 if (status != fan_control_initial_status) {
5761 tp_features.fan_ctrl_status_undef = 0;
5762 } else {
5763 status = TP_EC_FAN_AUTO;
5764 }
5765 }
5766
5767 if ((status & 6374 if ((status &
5768 (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) != 0) 6375 (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) != 0)
5769 status = fan_control_desired_level; 6376 status = fan_control_desired_level;
@@ -5788,7 +6395,7 @@ static ssize_t fan_pwm1_store(struct device *dev,
5788 /* scale down from 0-255 to 0-7 */ 6395 /* scale down from 0-255 to 0-7 */
5789 newlevel = (s >> 5) & 0x07; 6396 newlevel = (s >> 5) & 0x07;
5790 6397
5791 if (mutex_lock_interruptible(&fan_mutex)) 6398 if (mutex_lock_killable(&fan_mutex))
5792 return -ERESTARTSYS; 6399 return -ERESTARTSYS;
5793 6400
5794 rc = fan_get_status(&status); 6401 rc = fan_get_status(&status);
@@ -5895,29 +6502,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
5895 if (likely(acpi_ec_read(fan_status_offset, 6502 if (likely(acpi_ec_read(fan_status_offset,
5896 &fan_control_initial_status))) { 6503 &fan_control_initial_status))) {
5897 fan_status_access_mode = TPACPI_FAN_RD_TPEC; 6504 fan_status_access_mode = TPACPI_FAN_RD_TPEC;
5898 6505 fan_quirk1_detect();
5899 /* In some ThinkPads, neither the EC nor the ACPI
5900 * DSDT initialize the fan status, and it ends up
5901 * being set to 0x07 when it *could* be either
5902 * 0x07 or 0x80.
5903 *
5904 * Enable for TP-1Y (T43), TP-78 (R51e),
5905 * TP-76 (R52), TP-70 (T43, R52), which are known
5906 * to be buggy. */
5907 if (fan_control_initial_status == 0x07) {
5908 switch (thinkpad_id.ec_model) {
5909 case 0x5931: /* TP-1Y */
5910 case 0x3837: /* TP-78 */
5911 case 0x3637: /* TP-76 */
5912 case 0x3037: /* TP-70 */
5913 printk(TPACPI_NOTICE
5914 "fan_init: initial fan status "
5915 "is unknown, assuming it is "
5916 "in auto mode\n");
5917 tp_features.fan_ctrl_status_undef = 1;
5918 ;;
5919 }
5920 }
5921 } else { 6506 } else {
5922 printk(TPACPI_ERR 6507 printk(TPACPI_ERR
5923 "ThinkPad ACPI EC access misbehaving, " 6508 "ThinkPad ACPI EC access misbehaving, "
@@ -6106,15 +6691,6 @@ static int fan_read(char *p)
6106 if (rc < 0) 6691 if (rc < 0)
6107 return rc; 6692 return rc;
6108 6693
6109 if (unlikely(tp_features.fan_ctrl_status_undef)) {
6110 if (status != fan_control_initial_status)
6111 tp_features.fan_ctrl_status_undef = 0;
6112 else
6113 /* Return most likely status. In fact, it
6114 * might be the only possible status */
6115 status = TP_EC_FAN_AUTO;
6116 }
6117
6118 len += sprintf(p + len, "status:\t\t%s\n", 6694 len += sprintf(p + len, "status:\t\t%s\n",
6119 (status != 0) ? "enabled" : "disabled"); 6695 (status != 0) ? "enabled" : "disabled");
6120 6696
@@ -6563,6 +7139,10 @@ static struct ibm_init_struct ibms_init[] __initdata = {
6563 .init = wan_init, 7139 .init = wan_init,
6564 .data = &wan_driver_data, 7140 .data = &wan_driver_data,
6565 }, 7141 },
7142 {
7143 .init = uwb_init,
7144 .data = &uwb_driver_data,
7145 },
6566#ifdef CONFIG_THINKPAD_ACPI_VIDEO 7146#ifdef CONFIG_THINKPAD_ACPI_VIDEO
6567 { 7147 {
6568 .init = video_init, 7148 .init = video_init,
@@ -6701,6 +7281,32 @@ TPACPI_PARAM(brightness);
6701TPACPI_PARAM(volume); 7281TPACPI_PARAM(volume);
6702TPACPI_PARAM(fan); 7282TPACPI_PARAM(fan);
6703 7283
7284#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
7285module_param(dbg_wlswemul, uint, 0);
7286MODULE_PARM_DESC(dbg_wlswemul, "Enables WLSW emulation");
7287module_param_named(wlsw_state, tpacpi_wlsw_emulstate, bool, 0);
7288MODULE_PARM_DESC(wlsw_state,
7289 "Initial state of the emulated WLSW switch");
7290
7291module_param(dbg_bluetoothemul, uint, 0);
7292MODULE_PARM_DESC(dbg_bluetoothemul, "Enables bluetooth switch emulation");
7293module_param_named(bluetooth_state, tpacpi_bluetooth_emulstate, bool, 0);
7294MODULE_PARM_DESC(bluetooth_state,
7295 "Initial state of the emulated bluetooth switch");
7296
7297module_param(dbg_wwanemul, uint, 0);
7298MODULE_PARM_DESC(dbg_wwanemul, "Enables WWAN switch emulation");
7299module_param_named(wwan_state, tpacpi_wwan_emulstate, bool, 0);
7300MODULE_PARM_DESC(wwan_state,
7301 "Initial state of the emulated WWAN switch");
7302
7303module_param(dbg_uwbemul, uint, 0);
7304MODULE_PARM_DESC(dbg_uwbemul, "Enables UWB switch emulation");
7305module_param_named(uwb_state, tpacpi_uwb_emulstate, bool, 0);
7306MODULE_PARM_DESC(uwb_state,
7307 "Initial state of the emulated UWB switch");
7308#endif
7309
6704static void thinkpad_acpi_module_exit(void) 7310static void thinkpad_acpi_module_exit(void)
6705{ 7311{
6706 struct ibm_struct *ibm, *itmp; 7312 struct ibm_struct *ibm, *itmp;
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index d30bb766fcef..b56a704409d2 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -20,7 +20,7 @@
20 20
21static inline unsigned int get_irq_flags(struct resource *res) 21static inline unsigned int get_irq_flags(struct resource *res)
22{ 22{
23 unsigned int flags = IRQF_DISABLED | IRQF_SHARED; 23 unsigned int flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
24 24
25 flags |= res->flags & IRQF_TRIGGER_MASK; 25 flags |= res->flags & IRQF_TRIGGER_MASK;
26 26
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index 204158cf7a55..fe96793e3f08 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -732,7 +732,7 @@ static u64 pm_signal_group_to_ps3_lv1_signal_group(u64 group)
732 case 8: 732 case 8:
733 return pm_translate_signal_group_number_on_island8(subgroup); 733 return pm_translate_signal_group_number_on_island8(subgroup);
734 default: 734 default:
735 dev_dbg(sbd_core(), "%s:%u: island not found: %lu\n", __func__, 735 dev_dbg(sbd_core(), "%s:%u: island not found: %llu\n", __func__,
736 __LINE__, group); 736 __LINE__, group);
737 BUG(); 737 BUG();
738 break; 738 break;
@@ -765,7 +765,7 @@ static int __ps3_set_signal(u64 lv1_signal_group, u64 bus_select,
765 signal_select, attr1, attr2, attr3); 765 signal_select, attr1, attr2, attr3);
766 if (ret) 766 if (ret)
767 dev_err(sbd_core(), 767 dev_err(sbd_core(),
768 "%s:%u: error:%d 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", 768 "%s:%u: error:%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
769 __func__, __LINE__, ret, lv1_signal_group, bus_select, 769 __func__, __LINE__, ret, lv1_signal_group, bus_select,
770 signal_select, attr1, attr2, attr3); 770 signal_select, attr1, attr2, attr3);
771 771
@@ -908,7 +908,7 @@ void ps3_disable_pm(u32 cpu)
908 908
909 lpm_priv->tb_count = tmp; 909 lpm_priv->tb_count = tmp;
910 910
911 dev_dbg(sbd_core(), "%s:%u: tb_count %lu (%lxh)\n", __func__, __LINE__, 911 dev_dbg(sbd_core(), "%s:%u: tb_count %llu (%llxh)\n", __func__, __LINE__,
912 lpm_priv->tb_count, lpm_priv->tb_count); 912 lpm_priv->tb_count, lpm_priv->tb_count);
913} 913}
914EXPORT_SYMBOL_GPL(ps3_disable_pm); 914EXPORT_SYMBOL_GPL(ps3_disable_pm);
@@ -938,7 +938,7 @@ int ps3_lpm_copy_tb(unsigned long offset, void *buf, unsigned long count,
938 if (offset >= lpm_priv->tb_count) 938 if (offset >= lpm_priv->tb_count)
939 return 0; 939 return 0;
940 940
941 count = min(count, lpm_priv->tb_count - offset); 941 count = min_t(u64, count, lpm_priv->tb_count - offset);
942 942
943 while (*bytes_copied < count) { 943 while (*bytes_copied < count) {
944 const unsigned long request = count - *bytes_copied; 944 const unsigned long request = count - *bytes_copied;
@@ -993,7 +993,7 @@ int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
993 if (offset >= lpm_priv->tb_count) 993 if (offset >= lpm_priv->tb_count)
994 return 0; 994 return 0;
995 995
996 count = min(count, lpm_priv->tb_count - offset); 996 count = min_t(u64, count, lpm_priv->tb_count - offset);
997 997
998 while (*bytes_copied < count) { 998 while (*bytes_copied < count) {
999 const unsigned long request = count - *bytes_copied; 999 const unsigned long request = count - *bytes_copied;
@@ -1013,7 +1013,7 @@ int ps3_lpm_copy_tb_to_user(unsigned long offset, void __user *buf,
1013 result = copy_to_user(buf, lpm_priv->tb_cache, tmp); 1013 result = copy_to_user(buf, lpm_priv->tb_cache, tmp);
1014 1014
1015 if (result) { 1015 if (result) {
1016 dev_dbg(sbd_core(), "%s:%u: 0x%lx bytes at 0x%p\n", 1016 dev_dbg(sbd_core(), "%s:%u: 0x%llx bytes at 0x%p\n",
1017 __func__, __LINE__, tmp, buf); 1017 __func__, __LINE__, tmp, buf);
1018 dev_err(sbd_core(), "%s:%u: copy_to_user failed: %d\n", 1018 dev_err(sbd_core(), "%s:%u: copy_to_user failed: %d\n",
1019 __func__, __LINE__, result); 1019 __func__, __LINE__, result);
@@ -1148,8 +1148,8 @@ int ps3_lpm_open(enum ps3_lpm_tb_type tb_type, void *tb_cache,
1148 lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT; 1148 lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT;
1149 lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT; 1149 lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT;
1150 1150
1151 dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%lx, outlet_id 0x%lx, " 1151 dev_dbg(sbd_core(), "%s:%u: lpm_id 0x%llx, outlet_id 0x%llx, "
1152 "tb_size 0x%lx\n", __func__, __LINE__, lpm_priv->lpm_id, 1152 "tb_size 0x%llx\n", __func__, __LINE__, lpm_priv->lpm_id,
1153 lpm_priv->outlet_id, tb_size); 1153 lpm_priv->outlet_id, tb_size);
1154 1154
1155 return 0; 1155 return 0;
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index 90c097a7a47a..e4ad5ba5d0a3 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -114,7 +114,7 @@ struct ports_bmp {
114static void __maybe_unused _dump_ports_bmp( 114static void __maybe_unused _dump_ports_bmp(
115 const struct ports_bmp *bmp, const char *func, int line) 115 const struct ports_bmp *bmp, const char *func, int line)
116{ 116{
117 pr_debug("%s:%d: ports_bmp: %016lxh\n", func, line, bmp->status); 117 pr_debug("%s:%d: ports_bmp: %016llxh\n", func, line, bmp->status);
118} 118}
119 119
120#define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__) 120#define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__)
@@ -159,11 +159,13 @@ int ps3_vuart_get_triggers(struct ps3_system_bus_device *dev,
159 struct vuart_triggers *trig) 159 struct vuart_triggers *trig)
160{ 160{
161 int result; 161 int result;
162 unsigned long size; 162 u64 size;
163 unsigned long val; 163 u64 val;
164 u64 tx;
164 165
165 result = lv1_get_virtual_uart_param(dev->port_number, 166 result = lv1_get_virtual_uart_param(dev->port_number,
166 PARAM_TX_TRIGGER, &trig->tx); 167 PARAM_TX_TRIGGER, &tx);
168 trig->tx = tx;
167 169
168 if (result) { 170 if (result) {
169 dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n", 171 dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
@@ -201,7 +203,7 @@ int ps3_vuart_set_triggers(struct ps3_system_bus_device *dev, unsigned int tx,
201 unsigned int rx) 203 unsigned int rx)
202{ 204{
203 int result; 205 int result;
204 unsigned long size; 206 u64 size;
205 207
206 result = lv1_set_virtual_uart_param(dev->port_number, 208 result = lv1_set_virtual_uart_param(dev->port_number,
207 PARAM_TX_TRIGGER, tx); 209 PARAM_TX_TRIGGER, tx);
@@ -248,7 +250,7 @@ static int ps3_vuart_get_rx_bytes_waiting(struct ps3_system_bus_device *dev,
248 dev_dbg(&dev->core, "%s:%d: rx_bytes failed: %s\n", 250 dev_dbg(&dev->core, "%s:%d: rx_bytes failed: %s\n",
249 __func__, __LINE__, ps3_result(result)); 251 __func__, __LINE__, ps3_result(result));
250 252
251 dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__, 253 dev_dbg(&dev->core, "%s:%d: %llxh\n", __func__, __LINE__,
252 *bytes_waiting); 254 *bytes_waiting);
253 return result; 255 return result;
254} 256}
@@ -295,7 +297,7 @@ static int ps3_vuart_get_interrupt_status(struct ps3_system_bus_device *dev,
295 297
296 *status = tmp & priv->interrupt_mask; 298 *status = tmp & priv->interrupt_mask;
297 299
298 dev_dbg(&dev->core, "%s:%d: m %lxh, s %lxh, m&s %lxh\n", 300 dev_dbg(&dev->core, "%s:%d: m %llxh, s %llxh, m&s %lxh\n",
299 __func__, __LINE__, priv->interrupt_mask, tmp, *status); 301 __func__, __LINE__, priv->interrupt_mask, tmp, *status);
300 302
301 return result; 303 return result;
@@ -363,7 +365,7 @@ int ps3_vuart_disable_interrupt_disconnect(struct ps3_system_bus_device *dev)
363 */ 365 */
364 366
365static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev, 367static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
366 const void *buf, unsigned int bytes, unsigned long *bytes_written) 368 const void *buf, unsigned int bytes, u64 *bytes_written)
367{ 369{
368 int result; 370 int result;
369 struct ps3_vuart_port_priv *priv = to_port_priv(dev); 371 struct ps3_vuart_port_priv *priv = to_port_priv(dev);
@@ -379,7 +381,7 @@ static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
379 381
380 priv->stats.bytes_written += *bytes_written; 382 priv->stats.bytes_written += *bytes_written;
381 383
382 dev_dbg(&dev->core, "%s:%d: wrote %lxh/%xh=>%lxh\n", __func__, __LINE__, 384 dev_dbg(&dev->core, "%s:%d: wrote %llxh/%xh=>%lxh\n", __func__, __LINE__,
383 *bytes_written, bytes, priv->stats.bytes_written); 385 *bytes_written, bytes, priv->stats.bytes_written);
384 386
385 return result; 387 return result;
@@ -393,7 +395,7 @@ static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
393 */ 395 */
394 396
395static int ps3_vuart_raw_read(struct ps3_system_bus_device *dev, void *buf, 397static int ps3_vuart_raw_read(struct ps3_system_bus_device *dev, void *buf,
396 unsigned int bytes, unsigned long *bytes_read) 398 unsigned int bytes, u64 *bytes_read)
397{ 399{
398 int result; 400 int result;
399 struct ps3_vuart_port_priv *priv = to_port_priv(dev); 401 struct ps3_vuart_port_priv *priv = to_port_priv(dev);
@@ -411,7 +413,7 @@ static int ps3_vuart_raw_read(struct ps3_system_bus_device *dev, void *buf,
411 413
412 priv->stats.bytes_read += *bytes_read; 414 priv->stats.bytes_read += *bytes_read;
413 415
414 dev_dbg(&dev->core, "%s:%d: read %lxh/%xh=>%lxh\n", __func__, __LINE__, 416 dev_dbg(&dev->core, "%s:%d: read %llxh/%xh=>%lxh\n", __func__, __LINE__,
415 *bytes_read, bytes, priv->stats.bytes_read); 417 *bytes_read, bytes, priv->stats.bytes_read);
416 418
417 return result; 419 return result;
@@ -500,7 +502,7 @@ int ps3_vuart_write(struct ps3_system_bus_device *dev, const void *buf,
500 spin_lock_irqsave(&priv->tx_list.lock, flags); 502 spin_lock_irqsave(&priv->tx_list.lock, flags);
501 503
502 if (list_empty(&priv->tx_list.head)) { 504 if (list_empty(&priv->tx_list.head)) {
503 unsigned long bytes_written; 505 u64 bytes_written;
504 506
505 result = ps3_vuart_raw_write(dev, buf, bytes, &bytes_written); 507 result = ps3_vuart_raw_write(dev, buf, bytes, &bytes_written);
506 508
@@ -592,7 +594,7 @@ static int ps3_vuart_queue_rx_bytes(struct ps3_system_bus_device *dev,
592 list_add_tail(&lb->link, &priv->rx_list.head); 594 list_add_tail(&lb->link, &priv->rx_list.head);
593 priv->rx_list.bytes_held += bytes; 595 priv->rx_list.bytes_held += bytes;
594 596
595 dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %lxh bytes\n", 597 dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %llxh bytes\n",
596 __func__, __LINE__, lb->dbg_number, bytes); 598 __func__, __LINE__, lb->dbg_number, bytes);
597 599
598 *bytes_queued = bytes; 600 *bytes_queued = bytes;
@@ -745,7 +747,7 @@ static int ps3_vuart_handle_interrupt_tx(struct ps3_system_bus_device *dev)
745 747
746 list_for_each_entry_safe(lb, n, &priv->tx_list.head, link) { 748 list_for_each_entry_safe(lb, n, &priv->tx_list.head, link) {
747 749
748 unsigned long bytes_written; 750 u64 bytes_written;
749 751
750 result = ps3_vuart_raw_write(dev, lb->head, lb->tail - lb->head, 752 result = ps3_vuart_raw_write(dev, lb->head, lb->tail - lb->head,
751 &bytes_written); 753 &bytes_written);
@@ -762,7 +764,7 @@ static int ps3_vuart_handle_interrupt_tx(struct ps3_system_bus_device *dev)
762 if (bytes_written < lb->tail - lb->head) { 764 if (bytes_written < lb->tail - lb->head) {
763 lb->head += bytes_written; 765 lb->head += bytes_written;
764 dev_dbg(&dev->core, 766 dev_dbg(&dev->core,
765 "%s:%d cleared buf_%lu, %lxh bytes\n", 767 "%s:%d cleared buf_%lu, %llxh bytes\n",
766 __func__, __LINE__, lb->dbg_number, 768 __func__, __LINE__, lb->dbg_number,
767 bytes_written); 769 bytes_written);
768 goto port_full; 770 goto port_full;
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index 55955f16ad91..18066d555397 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -70,7 +70,7 @@ static int ps3stor_probe_access(struct ps3_storage_device *dev)
70 __func__, __LINE__, n); 70 __func__, __LINE__, n);
71 dev->region_idx = __ffs(dev->accessible_regions); 71 dev->region_idx = __ffs(dev->accessible_regions);
72 dev_info(&dev->sbd.core, 72 dev_info(&dev->sbd.core,
73 "First accessible region has index %u start %lu size %lu\n", 73 "First accessible region has index %u start %llu size %llu\n",
74 dev->region_idx, dev->regions[dev->region_idx].start, 74 dev->region_idx, dev->regions[dev->region_idx].start,
75 dev->regions[dev->region_idx].size); 75 dev->regions[dev->region_idx].size);
76 76
@@ -220,7 +220,7 @@ u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
220 const char *op = write ? "write" : "read"; 220 const char *op = write ? "write" : "read";
221 int res; 221 int res;
222 222
223 dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n", 223 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
224 __func__, __LINE__, op, sectors, start_sector); 224 __func__, __LINE__, op, sectors, start_sector);
225 225
226 init_completion(&dev->done); 226 init_completion(&dev->done);
@@ -238,7 +238,7 @@ u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
238 238
239 wait_for_completion(&dev->done); 239 wait_for_completion(&dev->done);
240 if (dev->lv1_status) { 240 if (dev->lv1_status) {
241 dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__, 241 dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
242 __LINE__, op, dev->lv1_status); 242 __LINE__, op, dev->lv1_status);
243 return dev->lv1_status; 243 return dev->lv1_status;
244 } 244 }
@@ -268,7 +268,7 @@ u64 ps3stor_send_command(struct ps3_storage_device *dev, u64 cmd, u64 arg1,
268{ 268{
269 int res; 269 int res;
270 270
271 dev_dbg(&dev->sbd.core, "%s:%u: send device command 0x%lx\n", __func__, 271 dev_dbg(&dev->sbd.core, "%s:%u: send device command 0x%llx\n", __func__,
272 __LINE__, cmd); 272 __LINE__, cmd);
273 273
274 init_completion(&dev->done); 274 init_completion(&dev->done);
@@ -277,19 +277,19 @@ u64 ps3stor_send_command(struct ps3_storage_device *dev, u64 cmd, u64 arg1,
277 arg2, arg3, arg4, &dev->tag); 277 arg2, arg3, arg4, &dev->tag);
278 if (res) { 278 if (res) {
279 dev_err(&dev->sbd.core, 279 dev_err(&dev->sbd.core,
280 "%s:%u: send_device_command 0x%lx failed %d\n", 280 "%s:%u: send_device_command 0x%llx failed %d\n",
281 __func__, __LINE__, cmd, res); 281 __func__, __LINE__, cmd, res);
282 return -1; 282 return -1;
283 } 283 }
284 284
285 wait_for_completion(&dev->done); 285 wait_for_completion(&dev->done);
286 if (dev->lv1_status) { 286 if (dev->lv1_status) {
287 dev_dbg(&dev->sbd.core, "%s:%u: command 0x%lx failed 0x%lx\n", 287 dev_dbg(&dev->sbd.core, "%s:%u: command 0x%llx failed 0x%llx\n",
288 __func__, __LINE__, cmd, dev->lv1_status); 288 __func__, __LINE__, cmd, dev->lv1_status);
289 return dev->lv1_status; 289 return dev->lv1_status;
290 } 290 }
291 291
292 dev_dbg(&dev->sbd.core, "%s:%u: command 0x%lx completed\n", __func__, 292 dev_dbg(&dev->sbd.core, "%s:%u: command 0x%llx completed\n", __func__,
293 __LINE__, cmd); 293 __LINE__, cmd);
294 294
295 return 0; 295 return 0;
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 48b372e038a8..56e23d44ba59 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -289,7 +289,7 @@ static struct regulator_desc regulators[] = {
289 }, 289 },
290}; 290};
291 291
292static int __init wm8400_regulator_probe(struct platform_device *pdev) 292static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
293{ 293{
294 struct regulator_dev *rdev; 294 struct regulator_dev *rdev;
295 295
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index cc7eb8767b82..bd56a033bfd0 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -27,6 +27,8 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/io.h> 28#include <linux/io.h>
29 29
30#include <mach/hardware.h>
31
30#define TIMER_FREQ CLOCK_TICK_RATE 32#define TIMER_FREQ CLOCK_TICK_RATE
31#define RTC_DEF_DIVIDER (32768 - 1) 33#define RTC_DEF_DIVIDER (32768 - 1)
32#define RTC_DEF_TRIM 0 34#define RTC_DEF_TRIM 0
diff --git a/drivers/rtc/rtc-twl4030.c b/drivers/rtc/rtc-twl4030.c
index 8ce5f74ee45b..ad35f76c46b7 100644
--- a/drivers/rtc/rtc-twl4030.c
+++ b/drivers/rtc/rtc-twl4030.c
@@ -120,7 +120,7 @@ static int twl4030_rtc_write_u8(u8 data, u8 reg)
120static unsigned char rtc_irq_bits; 120static unsigned char rtc_irq_bits;
121 121
122/* 122/*
123 * Enable timer and/or alarm interrupts. 123 * Enable 1/second update and/or alarm interrupts.
124 */ 124 */
125static int set_rtc_irq_bit(unsigned char bit) 125static int set_rtc_irq_bit(unsigned char bit)
126{ 126{
@@ -128,6 +128,7 @@ static int set_rtc_irq_bit(unsigned char bit)
128 int ret; 128 int ret;
129 129
130 val = rtc_irq_bits | bit; 130 val = rtc_irq_bits | bit;
131 val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M;
131 ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG); 132 ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
132 if (ret == 0) 133 if (ret == 0)
133 rtc_irq_bits = val; 134 rtc_irq_bits = val;
@@ -136,7 +137,7 @@ static int set_rtc_irq_bit(unsigned char bit)
136} 137}
137 138
138/* 139/*
139 * Disable timer and/or alarm interrupts. 140 * Disable update and/or alarm interrupts.
140 */ 141 */
141static int mask_rtc_irq_bit(unsigned char bit) 142static int mask_rtc_irq_bit(unsigned char bit)
142{ 143{
@@ -151,7 +152,7 @@ static int mask_rtc_irq_bit(unsigned char bit)
151 return ret; 152 return ret;
152} 153}
153 154
154static inline int twl4030_rtc_alarm_irq_set_state(int enabled) 155static int twl4030_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
155{ 156{
156 int ret; 157 int ret;
157 158
@@ -163,7 +164,7 @@ static inline int twl4030_rtc_alarm_irq_set_state(int enabled)
163 return ret; 164 return ret;
164} 165}
165 166
166static inline int twl4030_rtc_irq_set_state(int enabled) 167static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled)
167{ 168{
168 int ret; 169 int ret;
169 170
@@ -292,7 +293,7 @@ static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
292 unsigned char alarm_data[ALL_TIME_REGS + 1]; 293 unsigned char alarm_data[ALL_TIME_REGS + 1];
293 int ret; 294 int ret;
294 295
295 ret = twl4030_rtc_alarm_irq_set_state(0); 296 ret = twl4030_rtc_alarm_irq_enable(dev, 0);
296 if (ret) 297 if (ret)
297 goto out; 298 goto out;
298 299
@@ -312,35 +313,11 @@ static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
312 } 313 }
313 314
314 if (alm->enabled) 315 if (alm->enabled)
315 ret = twl4030_rtc_alarm_irq_set_state(1); 316 ret = twl4030_rtc_alarm_irq_enable(dev, 1);
316out: 317out:
317 return ret; 318 return ret;
318} 319}
319 320
320#ifdef CONFIG_RTC_INTF_DEV
321
322static int twl4030_rtc_ioctl(struct device *dev, unsigned int cmd,
323 unsigned long arg)
324{
325 switch (cmd) {
326 case RTC_AIE_OFF:
327 return twl4030_rtc_alarm_irq_set_state(0);
328 case RTC_AIE_ON:
329 return twl4030_rtc_alarm_irq_set_state(1);
330 case RTC_UIE_OFF:
331 return twl4030_rtc_irq_set_state(0);
332 case RTC_UIE_ON:
333 return twl4030_rtc_irq_set_state(1);
334
335 default:
336 return -ENOIOCTLCMD;
337 }
338}
339
340#else
341#define twl4030_rtc_ioctl NULL
342#endif
343
344static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc) 321static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
345{ 322{
346 unsigned long events = 0; 323 unsigned long events = 0;
@@ -400,11 +377,12 @@ out:
400} 377}
401 378
402static struct rtc_class_ops twl4030_rtc_ops = { 379static struct rtc_class_ops twl4030_rtc_ops = {
403 .ioctl = twl4030_rtc_ioctl,
404 .read_time = twl4030_rtc_read_time, 380 .read_time = twl4030_rtc_read_time,
405 .set_time = twl4030_rtc_set_time, 381 .set_time = twl4030_rtc_set_time,
406 .read_alarm = twl4030_rtc_read_alarm, 382 .read_alarm = twl4030_rtc_read_alarm,
407 .set_alarm = twl4030_rtc_set_alarm, 383 .set_alarm = twl4030_rtc_set_alarm,
384 .alarm_irq_enable = twl4030_rtc_alarm_irq_enable,
385 .update_irq_enable = twl4030_rtc_update_irq_enable,
408}; 386};
409 387
410/*----------------------------------------------------------------------*/ 388/*----------------------------------------------------------------------*/
@@ -422,7 +400,7 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
422 rtc = rtc_device_register(pdev->name, 400 rtc = rtc_device_register(pdev->name,
423 &pdev->dev, &twl4030_rtc_ops, THIS_MODULE); 401 &pdev->dev, &twl4030_rtc_ops, THIS_MODULE);
424 if (IS_ERR(rtc)) { 402 if (IS_ERR(rtc)) {
425 ret = -EINVAL; 403 ret = PTR_ERR(rtc);
426 dev_err(&pdev->dev, "can't register RTC device, err %ld\n", 404 dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
427 PTR_ERR(rtc)); 405 PTR_ERR(rtc));
428 goto out0; 406 goto out0;
@@ -432,7 +410,6 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
432 platform_set_drvdata(pdev, rtc); 410 platform_set_drvdata(pdev, rtc);
433 411
434 ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG); 412 ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
435
436 if (ret < 0) 413 if (ret < 0)
437 goto out1; 414 goto out1;
438 415
@@ -475,7 +452,6 @@ static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
475 452
476 return ret; 453 return ret;
477 454
478
479out2: 455out2:
480 free_irq(irq, rtc); 456 free_irq(irq, rtc);
481out1: 457out1:
@@ -506,8 +482,9 @@ static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
506 482
507static void twl4030_rtc_shutdown(struct platform_device *pdev) 483static void twl4030_rtc_shutdown(struct platform_device *pdev)
508{ 484{
509 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M | 485 /* mask timer interrupts, but leave alarm interrupts on to enable
510 BIT_RTC_INTERRUPTS_REG_IT_ALARM_M); 486 power-on when alarm is triggered */
487 mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
511} 488}
512 489
513#ifdef CONFIG_PM 490#ifdef CONFIG_PM
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index acca6678cb2b..49c3bfa1afd7 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -70,7 +70,9 @@ static char debug_buffer[255];
70static void lcs_tasklet(unsigned long); 70static void lcs_tasklet(unsigned long);
71static void lcs_start_kernel_thread(struct work_struct *); 71static void lcs_start_kernel_thread(struct work_struct *);
72static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); 72static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
73#ifdef CONFIG_IP_MULTICAST
73static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); 74static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
75#endif /* CONFIG_IP_MULTICAST */
74static int lcs_recovery(void *ptr); 76static int lcs_recovery(void *ptr);
75 77
76/** 78/**
@@ -1285,6 +1287,8 @@ out:
1285 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); 1287 lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
1286 return 0; 1288 return 0;
1287} 1289}
1290#endif /* CONFIG_IP_MULTICAST */
1291
1288/** 1292/**
1289 * function called by net device to 1293 * function called by net device to
1290 * handle multicast address relevant things 1294 * handle multicast address relevant things
@@ -1292,6 +1296,7 @@ out:
1292static void 1296static void
1293lcs_set_multicast_list(struct net_device *dev) 1297lcs_set_multicast_list(struct net_device *dev)
1294{ 1298{
1299#ifdef CONFIG_IP_MULTICAST
1295 struct lcs_card *card; 1300 struct lcs_card *card;
1296 1301
1297 LCS_DBF_TEXT(4, trace, "setmulti"); 1302 LCS_DBF_TEXT(4, trace, "setmulti");
@@ -1299,9 +1304,8 @@ lcs_set_multicast_list(struct net_device *dev)
1299 1304
1300 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) 1305 if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
1301 schedule_work(&card->kernel_thread_starter); 1306 schedule_work(&card->kernel_thread_starter);
1302}
1303
1304#endif /* CONFIG_IP_MULTICAST */ 1307#endif /* CONFIG_IP_MULTICAST */
1308}
1305 1309
1306static long 1310static long
1307lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) 1311lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ee0739b217b6..91ef669d98f6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -933,7 +933,7 @@ static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
933 fc_host_speed(shost) = FC_PORTSPEED_16GBIT; 933 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
934 break; 934 break;
935 default: 935 default:
936 ibmvfc_log(vhost, 3, "Unknown port speed: %ld Gbit\n", 936 ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
937 vhost->login_buf->resp.link_speed / 100); 937 vhost->login_buf->resp.link_speed / 100);
938 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 938 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
939 break; 939 break;
@@ -2149,8 +2149,8 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2149{ 2149{
2150 const char *desc = ibmvfc_get_ae_desc(crq->event); 2150 const char *desc = ibmvfc_get_ae_desc(crq->event);
2151 2151
2152 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %lx, wwpn: %lx," 2152 ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
2153 " node_name: %lx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); 2153 " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
2154 2154
2155 switch (crq->event) { 2155 switch (crq->event) {
2156 case IBMVFC_AE_LINK_UP: 2156 case IBMVFC_AE_LINK_UP:
@@ -2184,7 +2184,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2184 ibmvfc_link_down(vhost, IBMVFC_HALTED); 2184 ibmvfc_link_down(vhost, IBMVFC_HALTED);
2185 break; 2185 break;
2186 default: 2186 default:
2187 dev_err(vhost->dev, "Unknown async event received: %ld\n", crq->event); 2187 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
2188 break; 2188 break;
2189 }; 2189 };
2190} 2190}
@@ -2261,13 +2261,13 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2261 * actually sent 2261 * actually sent
2262 */ 2262 */
2263 if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) { 2263 if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2264 dev_err(vhost->dev, "Returned correlation_token 0x%08lx is invalid!\n", 2264 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
2265 crq->ioba); 2265 crq->ioba);
2266 return; 2266 return;
2267 } 2267 }
2268 2268
2269 if (unlikely(atomic_read(&evt->free))) { 2269 if (unlikely(atomic_read(&evt->free))) {
2270 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08lx!\n", 2270 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
2271 crq->ioba); 2271 crq->ioba);
2272 return; 2272 return;
2273 } 2273 }
@@ -3259,7 +3259,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
3259 3259
3260 tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); 3260 tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
3261 if (!tgt) { 3261 if (!tgt) {
3262 dev_err(vhost->dev, "Target allocation failure for scsi id %08lx\n", 3262 dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
3263 scsi_id); 3263 scsi_id);
3264 return -ENOMEM; 3264 return -ENOMEM;
3265 } 3265 }
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index babdf3db59df..87dafd0f8d44 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -691,13 +691,13 @@ struct ibmvfc_host {
691#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0) 691#define DBG_CMD(CMD) do { if (ibmvfc_debug) CMD; } while (0)
692 692
693#define tgt_dbg(t, fmt, ...) \ 693#define tgt_dbg(t, fmt, ...) \
694 DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)) 694 DBG_CMD(dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
695 695
696#define tgt_info(t, fmt, ...) \ 696#define tgt_info(t, fmt, ...) \
697 dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 697 dev_info((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
698 698
699#define tgt_err(t, fmt, ...) \ 699#define tgt_err(t, fmt, ...) \
700 dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 700 dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
701 701
702#define ibmvfc_dbg(vhost, ...) \ 702#define ibmvfc_dbg(vhost, ...) \
703 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) 703 DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 841f460edbc4..07829009a8be 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4912,7 +4912,7 @@ static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4912 if (res && ipr_is_gata(res)) { 4912 if (res && ipr_is_gata(res)) {
4913 if (cmd == HDIO_GET_IDENTITY) 4913 if (cmd == HDIO_GET_IDENTITY)
4914 return -ENOTTY; 4914 return -ENOTTY;
4915 return ata_scsi_ioctl(sdev, cmd, arg); 4915 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
4916 } 4916 }
4917 4917
4918 return -EINVAL; 4918 return -EINVAL;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index a745f91d2928..e7705d3532c9 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -177,7 +177,6 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
177 struct iscsi_segment *segment, int recv, 177 struct iscsi_segment *segment, int recv,
178 unsigned copied) 178 unsigned copied)
179{ 179{
180 static unsigned char padbuf[ISCSI_PAD_LEN];
181 struct scatterlist sg; 180 struct scatterlist sg;
182 unsigned int pad; 181 unsigned int pad;
183 182
@@ -233,7 +232,7 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
233 debug_tcp("consume %d pad bytes\n", pad); 232 debug_tcp("consume %d pad bytes\n", pad);
234 segment->total_size += pad; 233 segment->total_size += pad;
235 segment->size = pad; 234 segment->size = pad;
236 segment->data = padbuf; 235 segment->data = segment->padbuf;
237 return 0; 236 return 0;
238 } 237 }
239 } 238 }
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 744838780ada..1c558d3bce18 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -717,7 +717,7 @@ int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
717 struct domain_device *dev = sdev_to_domain_dev(sdev); 717 struct domain_device *dev = sdev_to_domain_dev(sdev);
718 718
719 if (dev_is_sata(dev)) 719 if (dev_is_sata(dev))
720 return ata_scsi_ioctl(sdev, cmd, arg); 720 return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
721 721
722 return -EINVAL; 722 return -EINVAL;
723} 723}
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index ce48e2d0193c..ca0dd33497ec 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -290,11 +290,11 @@ static irqreturn_t ps3rom_interrupt(int irq, void *data)
290 290
291 if (tag != dev->tag) 291 if (tag != dev->tag)
292 dev_err(&dev->sbd.core, 292 dev_err(&dev->sbd.core,
293 "%s:%u: tag mismatch, got %lx, expected %lx\n", 293 "%s:%u: tag mismatch, got %llx, expected %llx\n",
294 __func__, __LINE__, tag, dev->tag); 294 __func__, __LINE__, tag, dev->tag);
295 295
296 if (res) { 296 if (res) {
297 dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n", 297 dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%llx\n",
298 __func__, __LINE__, res, status); 298 __func__, __LINE__, res, status);
299 return IRQ_HANDLED; 299 return IRQ_HANDLED;
300 } 300 }
@@ -364,7 +364,7 @@ static int __devinit ps3rom_probe(struct ps3_system_bus_device *_dev)
364 364
365 if (dev->blk_size != CD_FRAMESIZE) { 365 if (dev->blk_size != CD_FRAMESIZE) {
366 dev_err(&dev->sbd.core, 366 dev_err(&dev->sbd.core,
367 "%s:%u: cannot handle block size %lu\n", __func__, 367 "%s:%u: cannot handle block size %llu\n", __func__,
368 __LINE__, dev->blk_size); 368 __LINE__, dev->blk_size);
369 return -EINVAL; 369 return -EINVAL;
370 } 370 }
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 2d4f32b4df5c..9ad4d0968e5c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1258,35 +1258,48 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1258{ 1258{
1259 int rval; 1259 int rval;
1260 unsigned long flags = 0; 1260 unsigned long flags = 0;
1261 int cnt; 1261 int cnt, que;
1262 struct qla_hw_data *ha = vha->hw; 1262 struct qla_hw_data *ha = vha->hw;
1263 struct req_que *req = ha->req_q_map[0]; 1263 struct req_que *req;
1264 struct rsp_que *rsp = ha->rsp_q_map[0]; 1264 struct rsp_que *rsp;
1265 struct scsi_qla_host *vp;
1265 struct mid_init_cb_24xx *mid_init_cb = 1266 struct mid_init_cb_24xx *mid_init_cb =
1266 (struct mid_init_cb_24xx *) ha->init_cb; 1267 (struct mid_init_cb_24xx *) ha->init_cb;
1267 1268
1268 spin_lock_irqsave(&ha->hardware_lock, flags); 1269 spin_lock_irqsave(&ha->hardware_lock, flags);
1269 1270
1270 /* Clear outstanding commands array. */ 1271 /* Clear outstanding commands array. */
1271 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) 1272 for (que = 0; que < ha->max_queues; que++) {
1272 req->outstanding_cmds[cnt] = NULL; 1273 req = ha->req_q_map[que];
1274 if (!req)
1275 continue;
1276 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
1277 req->outstanding_cmds[cnt] = NULL;
1273 1278
1274 req->current_outstanding_cmd = 0; 1279 req->current_outstanding_cmd = 0;
1275 1280
1276 /* Clear RSCN queue. */ 1281 /* Initialize firmware. */
1277 vha->rscn_in_ptr = 0; 1282 req->ring_ptr = req->ring;
1278 vha->rscn_out_ptr = 0; 1283 req->ring_index = 0;
1284 req->cnt = req->length;
1285 }
1279 1286
1280 /* Initialize firmware. */ 1287 for (que = 0; que < ha->max_queues; que++) {
1281 req->ring_ptr = req->ring; 1288 rsp = ha->rsp_q_map[que];
1282 req->ring_index = 0; 1289 if (!rsp)
1283 req->cnt = req->length; 1290 continue;
1284 rsp->ring_ptr = rsp->ring; 1291 rsp->ring_ptr = rsp->ring;
1285 rsp->ring_index = 0; 1292 rsp->ring_index = 0;
1286 1293
1287 /* Initialize response queue entries */ 1294 /* Initialize response queue entries */
1288 qla2x00_init_response_q_entries(rsp); 1295 qla2x00_init_response_q_entries(rsp);
1296 }
1289 1297
1298 /* Clear RSCN queue. */
1299 list_for_each_entry(vp, &ha->vp_list, list) {
1300 vp->rscn_in_ptr = 0;
1301 vp->rscn_out_ptr = 0;
1302 }
1290 ha->isp_ops->config_rings(vha); 1303 ha->isp_ops->config_rings(vha);
1291 1304
1292 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1305 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3212,8 +3225,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
3212 int rval = QLA_SUCCESS; 3225 int rval = QLA_SUCCESS;
3213 uint32_t wait_time; 3226 uint32_t wait_time;
3214 struct qla_hw_data *ha = vha->hw; 3227 struct qla_hw_data *ha = vha->hw;
3215 struct req_que *req = ha->req_q_map[0]; 3228 struct req_que *req = ha->req_q_map[vha->req_ques[0]];
3216 struct rsp_que *rsp = ha->rsp_q_map[0]; 3229 struct rsp_que *rsp = req->rsp;
3217 3230
3218 atomic_set(&vha->loop_state, LOOP_UPDATE); 3231 atomic_set(&vha->loop_state, LOOP_UPDATE);
3219 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3232 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -3492,6 +3505,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3492 } 3505 }
3493 req = ha->req_q_map[i]; 3506 req = ha->req_q_map[i];
3494 if (req) { 3507 if (req) {
3508 /* Clear outstanding commands array. */
3495 req->options &= ~BIT_0; 3509 req->options &= ~BIT_0;
3496 ret = qla25xx_init_req_que(base_vha, req, req->options); 3510 ret = qla25xx_init_req_que(base_vha, req, req->options);
3497 if (ret != QLA_SUCCESS) 3511 if (ret != QLA_SUCCESS)
@@ -3500,7 +3514,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
3500 req->id)); 3514 req->id));
3501 else 3515 else
3502 DEBUG2_17(printk(KERN_WARNING 3516 DEBUG2_17(printk(KERN_WARNING
3503 "%s Rsp que:%d inited\n", __func__, 3517 "%s Req que:%d inited\n", __func__,
3504 req->id)); 3518 req->id));
3505 } 3519 }
3506 } 3520 }
@@ -4151,8 +4165,8 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
4151 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4165 uint16_t mb[MAILBOX_REGISTER_COUNT];
4152 struct qla_hw_data *ha = vha->hw; 4166 struct qla_hw_data *ha = vha->hw;
4153 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4167 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4154 struct req_que *req = ha->req_q_map[0]; 4168 struct req_que *req = ha->req_q_map[vha->req_ques[0]];
4155 struct rsp_que *rsp = ha->rsp_q_map[0]; 4169 struct rsp_que *rsp = req->rsp;
4156 4170
4157 if (!vha->vp_idx) 4171 if (!vha->vp_idx)
4158 return -EINVAL; 4172 return -EINVAL;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 886323130fcc..f53179c46423 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -629,6 +629,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
629 req->ring_index = 0; 629 req->ring_index = 0;
630 req->cnt = req->length; 630 req->cnt = req->length;
631 req->id = que_id; 631 req->id = que_id;
632 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
632 mutex_unlock(&ha->vport_lock); 633 mutex_unlock(&ha->vport_lock);
633 634
634 ret = qla25xx_init_req_que(base_vha, req, options); 635 ret = qla25xx_init_req_que(base_vha, req, options);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 4a71f522f925..cf32653fe01a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1158,8 +1158,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1158 struct req_que *req; 1158 struct req_que *req;
1159 1159
1160 spin_lock_irqsave(&ha->hardware_lock, flags); 1160 spin_lock_irqsave(&ha->hardware_lock, flags);
1161 for (que = 0; que < QLA_MAX_HOST_QUES; que++) { 1161 for (que = 0; que < ha->max_queues; que++) {
1162 req = ha->req_q_map[vha->req_ques[que]]; 1162 req = ha->req_q_map[que];
1163 if (!req) 1163 if (!req)
1164 continue; 1164 continue;
1165 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1165 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
@@ -1193,7 +1193,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1193 scsi_qla_host_t *vha = shost_priv(sdev->host); 1193 scsi_qla_host_t *vha = shost_priv(sdev->host);
1194 struct qla_hw_data *ha = vha->hw; 1194 struct qla_hw_data *ha = vha->hw;
1195 struct fc_rport *rport = starget_to_rport(sdev->sdev_target); 1195 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1196 struct req_que *req = ha->req_q_map[0]; 1196 struct req_que *req = ha->req_q_map[vha->req_ques[0]];
1197 1197
1198 if (sdev->tagged_supported) 1198 if (sdev->tagged_supported)
1199 scsi_activate_tcq(sdev, req->max_q_depth); 1199 scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1998,7 +1998,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1998 return 0; 1998 return 0;
1999 1999
2000probe_failed: 2000probe_failed:
2001 qla2x00_free_que(ha, req, rsp);
2002 qla2x00_free_device(base_vha); 2001 qla2x00_free_device(base_vha);
2003 2002
2004 scsi_host_put(base_vha->host); 2003 scsi_host_put(base_vha->host);
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 42e72a2c1f98..cbcd3f681b62 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1095,7 +1095,8 @@ EXPORT_SYMBOL(__starget_for_each_device);
1095 * Description: Looks up the scsi_device with the specified @lun for a given 1095 * Description: Looks up the scsi_device with the specified @lun for a given
1096 * @starget. The returned scsi_device does not have an additional 1096 * @starget. The returned scsi_device does not have an additional
1097 * reference. You must hold the host's host_lock over this call and 1097 * reference. You must hold the host's host_lock over this call and
1098 * any access to the returned scsi_device. 1098 * any access to the returned scsi_device. A scsi_device in state
1099 * SDEV_DEL is skipped.
1099 * 1100 *
1100 * Note: The only reason why drivers should use this is because 1101 * Note: The only reason why drivers should use this is because
1101 * they need to access the device list in irq context. Otherwise you 1102 * they need to access the device list in irq context. Otherwise you
@@ -1107,6 +1108,8 @@ struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
1107 struct scsi_device *sdev; 1108 struct scsi_device *sdev;
1108 1109
1109 list_for_each_entry(sdev, &starget->devices, same_target_siblings) { 1110 list_for_each_entry(sdev, &starget->devices, same_target_siblings) {
1111 if (sdev->sdev_state == SDEV_DEL)
1112 continue;
1110 if (sdev->lun ==lun) 1113 if (sdev->lun ==lun)
1111 return sdev; 1114 return sdev;
1112 } 1115 }
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 4969e4ec75ea..099b5455bbce 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -224,6 +224,7 @@ static struct {
224 {"SGI", "TP9100", "*", BLIST_REPORTLUN2}, 224 {"SGI", "TP9100", "*", BLIST_REPORTLUN2},
225 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 225 {"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
226 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 226 {"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
227 {"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
227 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, 228 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
228 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, 229 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
229 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ 230 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index c088146b7513..2a3671233b15 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -602,6 +602,10 @@ static int pci_netmos_init(struct pci_dev *dev)
602 /* subdevice 0x00PS means <P> parallel, <S> serial */ 602 /* subdevice 0x00PS means <P> parallel, <S> serial */
603 unsigned int num_serial = dev->subsystem_device & 0xf; 603 unsigned int num_serial = dev->subsystem_device & 0xf;
604 604
605 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
606 dev->subsystem_device == 0x0299)
607 return 0;
608
605 if (num_serial == 0) 609 if (num_serial == 0)
606 return -ENODEV; 610 return -ENODEV;
607 return num_serial; 611 return num_serial;
@@ -3096,6 +3100,10 @@ static struct pci_device_id serial_pci_tbl[] = {
3096 0, 3100 0,
3097 pbn_b0_8_115200 }, 3101 pbn_b0_8_115200 },
3098 3102
3103 { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835,
3104 PCI_VENDOR_ID_IBM, 0x0299,
3105 0, 0, pbn_b0_bt_2_115200 },
3106
3099 /* 3107 /*
3100 * These entries match devices with class COMMUNICATION_SERIAL, 3108 * These entries match devices with class COMMUNICATION_SERIAL,
3101 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL 3109 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index fde7f9ccf57e..bbcfc26a3b6d 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -270,6 +270,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
270 { "RSS0250", 0 }, 270 { "RSS0250", 0 },
271 /* SupraExpress 28.8 Data/Fax PnP modem */ 271 /* SupraExpress 28.8 Data/Fax PnP modem */
272 { "SUP1310", 0 }, 272 { "SUP1310", 0 },
273 /* SupraExpress 336i PnP Voice Modem */
274 { "SUP1381", 0 },
273 /* SupraExpress 33.6 Data/Fax PnP modem */ 275 /* SupraExpress 33.6 Data/Fax PnP modem */
274 { "SUP1421", 0 }, 276 { "SUP1421", 0 },
275 /* SupraExpress 33.6 Data/Fax PnP modem */ 277 /* SupraExpress 33.6 Data/Fax PnP modem */
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index d5efd6c77904..89362d733d62 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -579,7 +579,7 @@ static void atmel_tx_dma(struct uart_port *port)
579 /* disable PDC transmit */ 579 /* disable PDC transmit */
580 UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); 580 UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
581 581
582 if (!uart_circ_empty(xmit)) { 582 if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
583 dma_sync_single_for_device(port->dev, 583 dma_sync_single_for_device(port->dev,
584 pdc->dma_addr, 584 pdc->dma_addr,
585 pdc->dma_size, 585 pdc->dma_size,
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index a821e3a3d664..14f8fa9135be 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -163,6 +163,7 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
163 { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, 163 { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, },
164 { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, 164 { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, },
165 { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, 165 { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, },
166 { .type = "serial", .compatible = "ns16850", .data = (void *)PORT_16850, },
166#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL 167#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
167 { .type = "serial", .compatible = "ibm,qpace-nwp-serial", 168 { .type = "serial", .compatible = "ibm,qpace-nwp-serial",
168 .data = (void *)PORT_NWPSERIAL, }, 169 .data = (void *)PORT_NWPSERIAL, },
diff --git a/drivers/serial/pnx8xxx_uart.c b/drivers/serial/pnx8xxx_uart.c
index 22e30d21225e..1bb8f1b45767 100644
--- a/drivers/serial/pnx8xxx_uart.c
+++ b/drivers/serial/pnx8xxx_uart.c
@@ -187,7 +187,7 @@ static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
187 status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) | 187 status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
188 ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT)); 188 ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
189 while (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFIFO)) { 189 while (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFIFO)) {
190 ch = serial_in(sport, PNX8XXX_FIFO); 190 ch = serial_in(sport, PNX8XXX_FIFO) & 0xff;
191 191
192 sport->port.icount.rx++; 192 sport->port.icount.rx++;
193 193
@@ -198,9 +198,16 @@ static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
198 * out of the main execution path 198 * out of the main execution path
199 */ 199 */
200 if (status & (FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE | 200 if (status & (FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE |
201 PNX8XXX_UART_FIFO_RXPAR) | 201 PNX8XXX_UART_FIFO_RXPAR |
202 PNX8XXX_UART_FIFO_RXBRK) |
202 ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))) { 203 ISTAT_TO_SM(PNX8XXX_UART_INT_RXOVRN))) {
203 if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR)) 204 if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXBRK)) {
205 status &= ~(FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
206 FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR));
207 sport->port.icount.brk++;
208 if (uart_handle_break(&sport->port))
209 goto ignore_char;
210 } else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR))
204 sport->port.icount.parity++; 211 sport->port.icount.parity++;
205 else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE)) 212 else if (status & FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE))
206 sport->port.icount.frame++; 213 sport->port.icount.frame++;
@@ -284,14 +291,8 @@ static irqreturn_t pnx8xxx_int(int irq, void *dev_id)
284 /* Get the interrupts */ 291 /* Get the interrupts */
285 status = serial_in(sport, PNX8XXX_ISTAT) & serial_in(sport, PNX8XXX_IEN); 292 status = serial_in(sport, PNX8XXX_ISTAT) & serial_in(sport, PNX8XXX_IEN);
286 293
287 /* Break signal received */ 294 /* Byte or break signal received */
288 if (status & PNX8XXX_UART_INT_BREAK) { 295 if (status & (PNX8XXX_UART_INT_RX | PNX8XXX_UART_INT_BREAK))
289 sport->port.icount.brk++;
290 uart_handle_break(&sport->port);
291 }
292
293 /* Byte received */
294 if (status & PNX8XXX_UART_INT_RX)
295 pnx8xxx_rx_chars(sport); 296 pnx8xxx_rx_chars(sport);
296 297
297 /* TX holding register empty - transmit a byte */ 298 /* TX holding register empty - transmit a byte */
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h
index 38c600c0dbbf..3599828b9766 100644
--- a/drivers/serial/sh-sci.h
+++ b/drivers/serial/sh-sci.h
@@ -32,7 +32,9 @@
32#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ 32#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \
33 defined(CONFIG_CPU_SUBTYPE_SH7721) 33 defined(CONFIG_CPU_SUBTYPE_SH7721)
34# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */ 34# define SCSCR_INIT(port) 0x0030 /* TIE=0,RIE=0,TE=1,RE=1 */
35#define SCIF_ORER 0x0200 /* overrun error bit */ 35# define PORT_PTCR 0xA405011EUL
36# define PORT_PVCR 0xA4050122UL
37# define SCIF_ORER 0x0200 /* overrun error bit */
36#elif defined(CONFIG_SH_RTS7751R2D) 38#elif defined(CONFIG_SH_RTS7751R2D)
37# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */ 39# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */
38# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ 40# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */
@@ -393,6 +395,7 @@ SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16)
393SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8) 395SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8)
394SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16) 396SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16)
395SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8) 397SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8)
398SCIx_FNS(SCSPTR, 0, 0, 0, 0)
396SCIF_FNS(SCTDSR, 0x0c, 8) 399SCIF_FNS(SCTDSR, 0x0c, 8)
397SCIF_FNS(SCFER, 0x10, 16) 400SCIF_FNS(SCFER, 0x10, 16)
398SCIF_FNS(SCFCR, 0x18, 16) 401SCIF_FNS(SCFCR, 0x18, 16)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4a6fe01831a8..83a185d52961 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -230,17 +230,6 @@ config SPI_XILINX
230# 230#
231comment "SPI Protocol Masters" 231comment "SPI Protocol Masters"
232 232
233config SPI_AT25
234 tristate "SPI EEPROMs from most vendors"
235 depends on SYSFS
236 help
237 Enable this driver to get read/write support to most SPI EEPROMs,
238 after you configure the board init code to know about each eeprom
239 on your target board.
240
241 This driver can also be built as a module. If so, the module
242 will be called at25.
243
244config SPI_SPIDEV 233config SPI_SPIDEV
245 tristate "User mode SPI device driver support" 234 tristate "User mode SPI device driver support"
246 depends on EXPERIMENTAL 235 depends on EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5e9f521b8844..5d0451936d86 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
33# ... add above this line ... 33# ... add above this line ...
34 34
35# SPI protocol drivers (device/link on bus) 35# SPI protocol drivers (device/link on bus)
36obj-$(CONFIG_SPI_AT25) += at25.o
37obj-$(CONFIG_SPI_SPIDEV) += spidev.o 36obj-$(CONFIG_SPI_SPIDEV) += spidev.o
38obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o 37obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
39# ... add above this line ... 38# ... add above this line ...
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 5e39bac9c51b..56ff3e6864ea 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -670,8 +670,7 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
670 dev_dbg(controller, "new message %p submitted for %s\n", 670 dev_dbg(controller, "new message %p submitted for %s\n",
671 msg, spi->dev.bus_id); 671 msg, spi->dev.bus_id);
672 672
673 if (unlikely(list_empty(&msg->transfers) 673 if (unlikely(list_empty(&msg->transfers)))
674 || !spi->max_speed_hz))
675 return -EINVAL; 674 return -EINVAL;
676 675
677 if (as->stopping) 676 if (as->stopping)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d5d0e40b1e2d..94d5ee263c20 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1554,7 +1554,7 @@ static int usb_configure_device_otg(struct usb_device *udev)
1554 * (Includes HNP test device.) 1554 * (Includes HNP test device.)
1555 */ 1555 */
1556 if (udev->bus->b_hnp_enable || udev->bus->is_b_host) { 1556 if (udev->bus->b_hnp_enable || udev->bus->is_b_host) {
1557 err = usb_port_suspend(udev); 1557 err = usb_port_suspend(udev, PMSG_SUSPEND);
1558 if (err < 0) 1558 if (err < 0)
1559 dev_dbg(&udev->dev, "HNP fail, %d\n", err); 1559 dev_dbg(&udev->dev, "HNP fail, %d\n", err);
1560 } 1560 }
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ef6cfa5a447f..c70a8f667d85 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2030,7 +2030,7 @@ static void ftdi_process_read(struct work_struct *work)
2030 spin_unlock_irqrestore(&priv->rx_lock, flags); 2030 spin_unlock_irqrestore(&priv->rx_lock, flags);
2031 dbg("%s - deferring remainder until unthrottled", 2031 dbg("%s - deferring remainder until unthrottled",
2032 __func__); 2032 __func__);
2033 return; 2033 goto out;
2034 } 2034 }
2035 spin_unlock_irqrestore(&priv->rx_lock, flags); 2035 spin_unlock_irqrestore(&priv->rx_lock, flags);
2036 /* if the port is closed stop trying to read */ 2036 /* if the port is closed stop trying to read */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6372f8b17b45..c94f71980c1b 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2123,6 +2123,18 @@ config FB_PRE_INIT_FB
2123 Select this option if display contents should be inherited as set by 2123 Select this option if display contents should be inherited as set by
2124 the bootloader. 2124 the bootloader.
2125 2125
2126config FB_MX3
2127 tristate "MX3 Framebuffer support"
2128 depends on FB && MX3_IPU
2129 select FB_CFB_FILLRECT
2130 select FB_CFB_COPYAREA
2131 select FB_CFB_IMAGEBLIT
2132 default y
2133 help
2134 This is a framebuffer device for the i.MX31 LCD Controller. So
2135 far only synchronous displays are supported. If you plan to use
2136 an LCD display with your i.MX31 system, say Y here.
2137
2126source "drivers/video/omap/Kconfig" 2138source "drivers/video/omap/Kconfig"
2127 2139
2128source "drivers/video/backlight/Kconfig" 2140source "drivers/video/backlight/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index be2b657546ef..2a998ca6181d 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -132,6 +132,7 @@ obj-$(CONFIG_FB_VGA16) += vga16fb.o
132obj-$(CONFIG_FB_OF) += offb.o 132obj-$(CONFIG_FB_OF) += offb.o
133obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o 133obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
134obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o 134obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
135obj-$(CONFIG_FB_MX3) += mx3fb.o
135 136
136# the test framebuffer is last 137# the test framebuffer is last
137obj-$(CONFIG_FB_VIRTUAL) += vfb.o 138obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/aty/radeon_i2c.c b/drivers/video/aty/radeon_i2c.c
index 2c5567175dca..359fc64e761a 100644
--- a/drivers/video/aty/radeon_i2c.c
+++ b/drivers/video/aty/radeon_i2c.c
@@ -72,7 +72,6 @@ static int radeon_setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
72 snprintf(chan->adapter.name, sizeof(chan->adapter.name), 72 snprintf(chan->adapter.name, sizeof(chan->adapter.name),
73 "radeonfb %s", name); 73 "radeonfb %s", name);
74 chan->adapter.owner = THIS_MODULE; 74 chan->adapter.owner = THIS_MODULE;
75 chan->adapter.id = I2C_HW_B_RADEON;
76 chan->adapter.algo_data = &chan->algo; 75 chan->adapter.algo_data = &chan->algo;
77 chan->adapter.dev.parent = &chan->rinfo->pdev->dev; 76 chan->adapter.dev.parent = &chan->rinfo->pdev->dev;
78 chan->algo.setsda = radeon_gpio_setsda; 77 chan->algo.setsda = radeon_gpio_setsda;
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 7644ed249564..37e60b1d2ed9 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -335,7 +335,20 @@ static int bfin_bf54x_fb_check_var(struct fb_var_screeninfo *var,
335 struct fb_info *info) 335 struct fb_info *info)
336{ 336{
337 337
338 if (var->bits_per_pixel != LCD_BPP) { 338 switch (var->bits_per_pixel) {
339 case 24:/* TRUECOLOUR, 16m */
340 var->red.offset = 16;
341 var->green.offset = 8;
342 var->blue.offset = 0;
343 var->red.length = var->green.length = var->blue.length = 8;
344 var->transp.offset = 0;
345 var->transp.length = 0;
346 var->transp.msb_right = 0;
347 var->red.msb_right = 0;
348 var->green.msb_right = 0;
349 var->blue.msb_right = 0;
350 break;
351 default:
339 pr_debug("%s: depth not supported: %u BPP\n", __func__, 352 pr_debug("%s: depth not supported: %u BPP\n", __func__,
340 var->bits_per_pixel); 353 var->bits_per_pixel);
341 return -EINVAL; 354 return -EINVAL;
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index a9b3ada05d99..2a423d3a2a8e 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -254,7 +254,20 @@ static int bfin_t350mcqb_fb_check_var(struct fb_var_screeninfo *var,
254 struct fb_info *info) 254 struct fb_info *info)
255{ 255{
256 256
257 if (var->bits_per_pixel != LCD_BPP) { 257 switch (var->bits_per_pixel) {
258 case 24:/* TRUECOLOUR, 16m */
259 var->red.offset = 0;
260 var->green.offset = 8;
261 var->blue.offset = 16;
262 var->red.length = var->green.length = var->blue.length = 8;
263 var->transp.offset = 0;
264 var->transp.length = 0;
265 var->transp.msb_right = 0;
266 var->red.msb_right = 0;
267 var->green.msb_right = 0;
268 var->blue.msb_right = 0;
269 break;
270 default:
258 pr_debug("%s: depth not supported: %u BPP\n", __func__, 271 pr_debug("%s: depth not supported: %u BPP\n", __func__,
259 var->bits_per_pixel); 272 var->bits_per_pixel);
260 return -EINVAL; 273 return -EINVAL;
diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c
index 7787c3322ffb..9dd55e5324a1 100644
--- a/drivers/video/i810/i810-i2c.c
+++ b/drivers/video/i810/i810-i2c.c
@@ -90,7 +90,6 @@ static int i810_setup_i2c_bus(struct i810fb_i2c_chan *chan, const char *name)
90 chan->adapter.owner = THIS_MODULE; 90 chan->adapter.owner = THIS_MODULE;
91 chan->adapter.algo_data = &chan->algo; 91 chan->adapter.algo_data = &chan->algo;
92 chan->adapter.dev.parent = &chan->par->dev->dev; 92 chan->adapter.dev.parent = &chan->par->dev->dev;
93 chan->adapter.id = I2C_HW_B_I810;
94 chan->algo.setsda = i810i2c_setsda; 93 chan->algo.setsda = i810i2c_setsda;
95 chan->algo.setscl = i810i2c_setscl; 94 chan->algo.setscl = i810i2c_setscl;
96 chan->algo.getsda = i810i2c_getsda; 95 chan->algo.getsda = i810i2c_getsda;
diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c
index 5d896b81f4e0..b3065492bb20 100644
--- a/drivers/video/intelfb/intelfb_i2c.c
+++ b/drivers/video/intelfb/intelfb_i2c.c
@@ -111,7 +111,6 @@ static int intelfb_setup_i2c_bus(struct intelfb_info *dinfo,
111 "intelfb %s", name); 111 "intelfb %s", name);
112 chan->adapter.class = class; 112 chan->adapter.class = class;
113 chan->adapter.owner = THIS_MODULE; 113 chan->adapter.owner = THIS_MODULE;
114 chan->adapter.id = I2C_HW_B_INTELFB;
115 chan->adapter.algo_data = &chan->algo; 114 chan->adapter.algo_data = &chan->algo;
116 chan->adapter.dev.parent = &chan->dinfo->pdev->dev; 115 chan->adapter.dev.parent = &chan->dinfo->pdev->dev;
117 chan->algo.setsda = intelfb_gpio_setsda; 116 chan->algo.setsda = intelfb_gpio_setsda;
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
new file mode 100644
index 000000000000..8a75d05f4334
--- /dev/null
+++ b/drivers/video/mx3fb.c
@@ -0,0 +1,1555 @@
1/*
2 * Copyright (C) 2008
3 * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
4 *
5 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/platform_device.h>
15#include <linux/sched.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/interrupt.h>
19#include <linux/slab.h>
20#include <linux/fb.h>
21#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/ioport.h>
24#include <linux/dma-mapping.h>
25#include <linux/dmaengine.h>
26#include <linux/console.h>
27#include <linux/clk.h>
28#include <linux/mutex.h>
29
30#include <mach/hardware.h>
31#include <mach/ipu.h>
32#include <mach/mx3fb.h>
33
34#include <asm/io.h>
35#include <asm/uaccess.h>
36
37#define MX3FB_NAME "mx3_sdc_fb"
38
39#define MX3FB_REG_OFFSET 0xB4
40
41/* SDC Registers */
42#define SDC_COM_CONF (0xB4 - MX3FB_REG_OFFSET)
43#define SDC_GW_CTRL (0xB8 - MX3FB_REG_OFFSET)
44#define SDC_FG_POS (0xBC - MX3FB_REG_OFFSET)
45#define SDC_BG_POS (0xC0 - MX3FB_REG_OFFSET)
46#define SDC_CUR_POS (0xC4 - MX3FB_REG_OFFSET)
47#define SDC_PWM_CTRL (0xC8 - MX3FB_REG_OFFSET)
48#define SDC_CUR_MAP (0xCC - MX3FB_REG_OFFSET)
49#define SDC_HOR_CONF (0xD0 - MX3FB_REG_OFFSET)
50#define SDC_VER_CONF (0xD4 - MX3FB_REG_OFFSET)
51#define SDC_SHARP_CONF_1 (0xD8 - MX3FB_REG_OFFSET)
52#define SDC_SHARP_CONF_2 (0xDC - MX3FB_REG_OFFSET)
53
54/* Register bits */
55#define SDC_COM_TFT_COLOR 0x00000001UL
56#define SDC_COM_FG_EN 0x00000010UL
57#define SDC_COM_GWSEL 0x00000020UL
58#define SDC_COM_GLB_A 0x00000040UL
59#define SDC_COM_KEY_COLOR_G 0x00000080UL
60#define SDC_COM_BG_EN 0x00000200UL
61#define SDC_COM_SHARP 0x00001000UL
62
63#define SDC_V_SYNC_WIDTH_L 0x00000001UL
64
65/* Display Interface registers */
66#define DI_DISP_IF_CONF (0x0124 - MX3FB_REG_OFFSET)
67#define DI_DISP_SIG_POL (0x0128 - MX3FB_REG_OFFSET)
68#define DI_SER_DISP1_CONF (0x012C - MX3FB_REG_OFFSET)
69#define DI_SER_DISP2_CONF (0x0130 - MX3FB_REG_OFFSET)
70#define DI_HSP_CLK_PER (0x0134 - MX3FB_REG_OFFSET)
71#define DI_DISP0_TIME_CONF_1 (0x0138 - MX3FB_REG_OFFSET)
72#define DI_DISP0_TIME_CONF_2 (0x013C - MX3FB_REG_OFFSET)
73#define DI_DISP0_TIME_CONF_3 (0x0140 - MX3FB_REG_OFFSET)
74#define DI_DISP1_TIME_CONF_1 (0x0144 - MX3FB_REG_OFFSET)
75#define DI_DISP1_TIME_CONF_2 (0x0148 - MX3FB_REG_OFFSET)
76#define DI_DISP1_TIME_CONF_3 (0x014C - MX3FB_REG_OFFSET)
77#define DI_DISP2_TIME_CONF_1 (0x0150 - MX3FB_REG_OFFSET)
78#define DI_DISP2_TIME_CONF_2 (0x0154 - MX3FB_REG_OFFSET)
79#define DI_DISP2_TIME_CONF_3 (0x0158 - MX3FB_REG_OFFSET)
80#define DI_DISP3_TIME_CONF (0x015C - MX3FB_REG_OFFSET)
81#define DI_DISP0_DB0_MAP (0x0160 - MX3FB_REG_OFFSET)
82#define DI_DISP0_DB1_MAP (0x0164 - MX3FB_REG_OFFSET)
83#define DI_DISP0_DB2_MAP (0x0168 - MX3FB_REG_OFFSET)
84#define DI_DISP0_CB0_MAP (0x016C - MX3FB_REG_OFFSET)
85#define DI_DISP0_CB1_MAP (0x0170 - MX3FB_REG_OFFSET)
86#define DI_DISP0_CB2_MAP (0x0174 - MX3FB_REG_OFFSET)
87#define DI_DISP1_DB0_MAP (0x0178 - MX3FB_REG_OFFSET)
88#define DI_DISP1_DB1_MAP (0x017C - MX3FB_REG_OFFSET)
89#define DI_DISP1_DB2_MAP (0x0180 - MX3FB_REG_OFFSET)
90#define DI_DISP1_CB0_MAP (0x0184 - MX3FB_REG_OFFSET)
91#define DI_DISP1_CB1_MAP (0x0188 - MX3FB_REG_OFFSET)
92#define DI_DISP1_CB2_MAP (0x018C - MX3FB_REG_OFFSET)
93#define DI_DISP2_DB0_MAP (0x0190 - MX3FB_REG_OFFSET)
94#define DI_DISP2_DB1_MAP (0x0194 - MX3FB_REG_OFFSET)
95#define DI_DISP2_DB2_MAP (0x0198 - MX3FB_REG_OFFSET)
96#define DI_DISP2_CB0_MAP (0x019C - MX3FB_REG_OFFSET)
97#define DI_DISP2_CB1_MAP (0x01A0 - MX3FB_REG_OFFSET)
98#define DI_DISP2_CB2_MAP (0x01A4 - MX3FB_REG_OFFSET)
99#define DI_DISP3_B0_MAP (0x01A8 - MX3FB_REG_OFFSET)
100#define DI_DISP3_B1_MAP (0x01AC - MX3FB_REG_OFFSET)
101#define DI_DISP3_B2_MAP (0x01B0 - MX3FB_REG_OFFSET)
102#define DI_DISP_ACC_CC (0x01B4 - MX3FB_REG_OFFSET)
103#define DI_DISP_LLA_CONF (0x01B8 - MX3FB_REG_OFFSET)
104#define DI_DISP_LLA_DATA (0x01BC - MX3FB_REG_OFFSET)
105
106/* DI_DISP_SIG_POL bits */
107#define DI_D3_VSYNC_POL_SHIFT 28
108#define DI_D3_HSYNC_POL_SHIFT 27
109#define DI_D3_DRDY_SHARP_POL_SHIFT 26
110#define DI_D3_CLK_POL_SHIFT 25
111#define DI_D3_DATA_POL_SHIFT 24
112
113/* DI_DISP_IF_CONF bits */
114#define DI_D3_CLK_IDLE_SHIFT 26
115#define DI_D3_CLK_SEL_SHIFT 25
116#define DI_D3_DATAMSK_SHIFT 24
117
118enum ipu_panel {
119 IPU_PANEL_SHARP_TFT,
120 IPU_PANEL_TFT,
121};
122
123struct ipu_di_signal_cfg {
124 unsigned datamask_en:1;
125 unsigned clksel_en:1;
126 unsigned clkidle_en:1;
127 unsigned data_pol:1; /* true = inverted */
128 unsigned clk_pol:1; /* true = rising edge */
129 unsigned enable_pol:1;
130 unsigned Hsync_pol:1; /* true = active high */
131 unsigned Vsync_pol:1;
132};
133
134static const struct fb_videomode mx3fb_modedb[] = {
135 {
136 /* 240x320 @ 60 Hz */
137 .name = "Sharp-QVGA",
138 .refresh = 60,
139 .xres = 240,
140 .yres = 320,
141 .pixclock = 185925,
142 .left_margin = 9,
143 .right_margin = 16,
144 .upper_margin = 7,
145 .lower_margin = 9,
146 .hsync_len = 1,
147 .vsync_len = 1,
148 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE |
149 FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT |
150 FB_SYNC_CLK_IDLE_EN,
151 .vmode = FB_VMODE_NONINTERLACED,
152 .flag = 0,
153 }, {
154 /* 240x33 @ 60 Hz */
155 .name = "Sharp-CLI",
156 .refresh = 60,
157 .xres = 240,
158 .yres = 33,
159 .pixclock = 185925,
160 .left_margin = 9,
161 .right_margin = 16,
162 .upper_margin = 7,
163 .lower_margin = 9 + 287,
164 .hsync_len = 1,
165 .vsync_len = 1,
166 .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE |
167 FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT |
168 FB_SYNC_CLK_IDLE_EN,
169 .vmode = FB_VMODE_NONINTERLACED,
170 .flag = 0,
171 }, {
172 /* 640x480 @ 60 Hz */
173 .name = "NEC-VGA",
174 .refresh = 60,
175 .xres = 640,
176 .yres = 480,
177 .pixclock = 38255,
178 .left_margin = 144,
179 .right_margin = 0,
180 .upper_margin = 34,
181 .lower_margin = 40,
182 .hsync_len = 1,
183 .vsync_len = 1,
184 .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH,
185 .vmode = FB_VMODE_NONINTERLACED,
186 .flag = 0,
187 }, {
188 /* NTSC TV output */
189 .name = "TV-NTSC",
190 .refresh = 60,
191 .xres = 640,
192 .yres = 480,
193 .pixclock = 37538,
194 .left_margin = 38,
195 .right_margin = 858 - 640 - 38 - 3,
196 .upper_margin = 36,
197 .lower_margin = 518 - 480 - 36 - 1,
198 .hsync_len = 3,
199 .vsync_len = 1,
200 .sync = 0,
201 .vmode = FB_VMODE_NONINTERLACED,
202 .flag = 0,
203 }, {
204 /* PAL TV output */
205 .name = "TV-PAL",
206 .refresh = 50,
207 .xres = 640,
208 .yres = 480,
209 .pixclock = 37538,
210 .left_margin = 38,
211 .right_margin = 960 - 640 - 38 - 32,
212 .upper_margin = 32,
213 .lower_margin = 555 - 480 - 32 - 3,
214 .hsync_len = 32,
215 .vsync_len = 3,
216 .sync = 0,
217 .vmode = FB_VMODE_NONINTERLACED,
218 .flag = 0,
219 }, {
220 /* TV output VGA mode, 640x480 @ 65 Hz */
221 .name = "TV-VGA",
222 .refresh = 60,
223 .xres = 640,
224 .yres = 480,
225 .pixclock = 40574,
226 .left_margin = 35,
227 .right_margin = 45,
228 .upper_margin = 9,
229 .lower_margin = 1,
230 .hsync_len = 46,
231 .vsync_len = 5,
232 .sync = 0,
233 .vmode = FB_VMODE_NONINTERLACED,
234 .flag = 0,
235 },
236};
237
238struct mx3fb_data {
239 struct fb_info *fbi;
240 int backlight_level;
241 void __iomem *reg_base;
242 spinlock_t lock;
243 struct device *dev;
244
245 uint32_t h_start_width;
246 uint32_t v_start_width;
247};
248
249struct dma_chan_request {
250 struct mx3fb_data *mx3fb;
251 enum ipu_channel id;
252};
253
254/* MX3 specific framebuffer information. */
255struct mx3fb_info {
256 int blank;
257 enum ipu_channel ipu_ch;
258 uint32_t cur_ipu_buf;
259
260 u32 pseudo_palette[16];
261
262 struct completion flip_cmpl;
263 struct mutex mutex; /* Protects fb-ops */
264 struct mx3fb_data *mx3fb;
265 struct idmac_channel *idmac_channel;
266 struct dma_async_tx_descriptor *txd;
267 dma_cookie_t cookie;
268 struct scatterlist sg[2];
269
270 u32 sync; /* preserve var->sync flags */
271};
272
273static void mx3fb_dma_done(void *);
274
275/* Used fb-mode and bpp. Can be set on kernel command line, therefore file-static. */
276static const char *fb_mode;
277static unsigned long default_bpp = 16;
278
279static u32 mx3fb_read_reg(struct mx3fb_data *mx3fb, unsigned long reg)
280{
281 return __raw_readl(mx3fb->reg_base + reg);
282}
283
284static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long reg)
285{
286 __raw_writel(value, mx3fb->reg_base + reg);
287}
288
289static const uint32_t di_mappings[] = {
290 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */
291 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */
292 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */
293 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */
294};
295
296static void sdc_fb_init(struct mx3fb_info *fbi)
297{
298 struct mx3fb_data *mx3fb = fbi->mx3fb;
299 uint32_t reg;
300
301 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
302
303 mx3fb_write_reg(mx3fb, reg | SDC_COM_BG_EN, SDC_COM_CONF);
304}
305
306/* Returns enabled flag before uninit */
307static uint32_t sdc_fb_uninit(struct mx3fb_info *fbi)
308{
309 struct mx3fb_data *mx3fb = fbi->mx3fb;
310 uint32_t reg;
311
312 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
313
314 mx3fb_write_reg(mx3fb, reg & ~SDC_COM_BG_EN, SDC_COM_CONF);
315
316 return reg & SDC_COM_BG_EN;
317}
318
319static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
320{
321 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
322 struct idmac_channel *ichan = mx3_fbi->idmac_channel;
323 struct dma_chan *dma_chan = &ichan->dma_chan;
324 unsigned long flags;
325 dma_cookie_t cookie;
326
327 dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
328 to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
329
330 /* This enables the channel */
331 if (mx3_fbi->cookie < 0) {
332 mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
333 &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
334 if (!mx3_fbi->txd) {
335 dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
336 dma_chan->chan_id);
337 return;
338 }
339
340 mx3_fbi->txd->callback_param = mx3_fbi->txd;
341 mx3_fbi->txd->callback = mx3fb_dma_done;
342
343 cookie = mx3_fbi->txd->tx_submit(mx3_fbi->txd);
344 dev_dbg(mx3fb->dev, "%d: Submit %p #%d [%c]\n", __LINE__,
345 mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
346 } else {
347 if (!mx3_fbi->txd || !mx3_fbi->txd->tx_submit) {
348 dev_err(mx3fb->dev, "Cannot enable channel %d\n",
349 dma_chan->chan_id);
350 return;
351 }
352
353 /* Just re-activate the same buffer */
354 dma_async_issue_pending(dma_chan);
355 cookie = mx3_fbi->cookie;
356 dev_dbg(mx3fb->dev, "%d: Re-submit %p #%d [%c]\n", __LINE__,
357 mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
358 }
359
360 if (cookie >= 0) {
361 spin_lock_irqsave(&mx3fb->lock, flags);
362 sdc_fb_init(mx3_fbi);
363 mx3_fbi->cookie = cookie;
364 spin_unlock_irqrestore(&mx3fb->lock, flags);
365 }
366
367 /*
368 * Attention! Without this msleep the channel keeps generating
369 * interrupts. Next sdc_set_brightness() is going to be called
370 * from mx3fb_blank().
371 */
372 msleep(2);
373}
374
375static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
376{
377 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
378 uint32_t enabled;
379 unsigned long flags;
380
381 spin_lock_irqsave(&mx3fb->lock, flags);
382
383 enabled = sdc_fb_uninit(mx3_fbi);
384
385 spin_unlock_irqrestore(&mx3fb->lock, flags);
386
387 mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan);
388 mx3_fbi->txd = NULL;
389 mx3_fbi->cookie = -EINVAL;
390}
391
392/**
393 * sdc_set_window_pos() - set window position of the respective plane.
394 * @mx3fb: mx3fb context.
395 * @channel: IPU DMAC channel ID.
396 * @x_pos: X coordinate relative to the top left corner to place window at.
397 * @y_pos: Y coordinate relative to the top left corner to place window at.
398 * @return: 0 on success or negative error code on failure.
399 */
400static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel,
401 int16_t x_pos, int16_t y_pos)
402{
403 x_pos += mx3fb->h_start_width;
404 y_pos += mx3fb->v_start_width;
405
406 if (channel != IDMAC_SDC_0)
407 return -EINVAL;
408
409 mx3fb_write_reg(mx3fb, (x_pos << 16) | y_pos, SDC_BG_POS);
410 return 0;
411}
412
413/**
414 * sdc_init_panel() - initialize a synchronous LCD panel.
415 * @mx3fb: mx3fb context.
416 * @panel: panel type.
417 * @pixel_clk: desired pixel clock frequency in Hz.
418 * @width: width of panel in pixels.
419 * @height: height of panel in pixels.
420 * @pixel_fmt: pixel format of buffer as FOURCC ASCII code.
421 * @h_start_width: number of pixel clocks between the HSYNC signal pulse
422 * and the start of valid data.
423 * @h_sync_width: width of the HSYNC signal in units of pixel clocks.
424 * @h_end_width: number of pixel clocks between the end of valid data
425 * and the HSYNC signal for next line.
426 * @v_start_width: number of lines between the VSYNC signal pulse and the
427 * start of valid data.
428 * @v_sync_width: width of the VSYNC signal in units of lines
429 * @v_end_width: number of lines between the end of valid data and the
430 * VSYNC signal for next frame.
431 * @sig: bitfield of signal polarities for LCD interface.
432 * @return: 0 on success or negative error code on failure.
433 */
434static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
435 uint32_t pixel_clk,
436 uint16_t width, uint16_t height,
437 enum pixel_fmt pixel_fmt,
438 uint16_t h_start_width, uint16_t h_sync_width,
439 uint16_t h_end_width, uint16_t v_start_width,
440 uint16_t v_sync_width, uint16_t v_end_width,
441 struct ipu_di_signal_cfg sig)
442{
443 unsigned long lock_flags;
444 uint32_t reg;
445 uint32_t old_conf;
446 uint32_t div;
447 struct clk *ipu_clk;
448
449 dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
450
451 if (v_sync_width == 0 || h_sync_width == 0)
452 return -EINVAL;
453
454 /* Init panel size and blanking periods */
455 reg = ((uint32_t) (h_sync_width - 1) << 26) |
456 ((uint32_t) (width + h_start_width + h_end_width - 1) << 16);
457 mx3fb_write_reg(mx3fb, reg, SDC_HOR_CONF);
458
459#ifdef DEBUG
460 printk(KERN_CONT " hor_conf %x,", reg);
461#endif
462
463 reg = ((uint32_t) (v_sync_width - 1) << 26) | SDC_V_SYNC_WIDTH_L |
464 ((uint32_t) (height + v_start_width + v_end_width - 1) << 16);
465 mx3fb_write_reg(mx3fb, reg, SDC_VER_CONF);
466
467#ifdef DEBUG
468 printk(KERN_CONT " ver_conf %x\n", reg);
469#endif
470
471 mx3fb->h_start_width = h_start_width;
472 mx3fb->v_start_width = v_start_width;
473
474 switch (panel) {
475 case IPU_PANEL_SHARP_TFT:
476 mx3fb_write_reg(mx3fb, 0x00FD0102L, SDC_SHARP_CONF_1);
477 mx3fb_write_reg(mx3fb, 0x00F500F4L, SDC_SHARP_CONF_2);
478 mx3fb_write_reg(mx3fb, SDC_COM_SHARP | SDC_COM_TFT_COLOR, SDC_COM_CONF);
479 break;
480 case IPU_PANEL_TFT:
481 mx3fb_write_reg(mx3fb, SDC_COM_TFT_COLOR, SDC_COM_CONF);
482 break;
483 default:
484 return -EINVAL;
485 }
486
487 /* Init clocking */
488
489 /*
490 * Calculate divider: fractional part is 4 bits so simply multiple by
491 * 24 to get fractional part, as long as we stay under ~250MHz and on
492 * i.MX31 it (HSP_CLK) is <= 178MHz. Currently 128.267MHz
493 */
494 dev_dbg(mx3fb->dev, "pixel clk = %d\n", pixel_clk);
495
496 ipu_clk = clk_get(mx3fb->dev, "ipu_clk");
497 div = clk_get_rate(ipu_clk) * 16 / pixel_clk;
498 clk_put(ipu_clk);
499
500 if (div < 0x40) { /* Divider less than 4 */
501 dev_dbg(mx3fb->dev,
502 "InitPanel() - Pixel clock divider less than 4\n");
503 div = 0x40;
504 }
505
506 spin_lock_irqsave(&mx3fb->lock, lock_flags);
507
508 /*
509 * DISP3_IF_CLK_DOWN_WR is half the divider value and 2 fraction bits
510 * fewer. Subtract 1 extra from DISP3_IF_CLK_DOWN_WR based on timing
511 * debug. DISP3_IF_CLK_UP_WR is 0
512 */
513 mx3fb_write_reg(mx3fb, (((div / 8) - 1) << 22) | div, DI_DISP3_TIME_CONF);
514
515 /* DI settings */
516 old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF;
517 old_conf |= sig.datamask_en << DI_D3_DATAMSK_SHIFT |
518 sig.clksel_en << DI_D3_CLK_SEL_SHIFT |
519 sig.clkidle_en << DI_D3_CLK_IDLE_SHIFT;
520 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF);
521
522 old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF;
523 old_conf |= sig.data_pol << DI_D3_DATA_POL_SHIFT |
524 sig.clk_pol << DI_D3_CLK_POL_SHIFT |
525 sig.enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
526 sig.Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
527 sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
528 mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
529
530 switch (pixel_fmt) {
531 case IPU_PIX_FMT_RGB24:
532 mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP);
533 mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP);
534 mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
535 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
536 ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
537 break;
538 case IPU_PIX_FMT_RGB666:
539 mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
540 mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
541 mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
542 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
543 ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
544 break;
545 case IPU_PIX_FMT_BGR666:
546 mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
547 mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
548 mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
549 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
550 ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
551 break;
552 default:
553 mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
554 mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
555 mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
556 mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
557 ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
558 break;
559 }
560
561 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
562
563 dev_dbg(mx3fb->dev, "DI_DISP_IF_CONF = 0x%08X\n",
564 mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF));
565 dev_dbg(mx3fb->dev, "DI_DISP_SIG_POL = 0x%08X\n",
566 mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL));
567 dev_dbg(mx3fb->dev, "DI_DISP3_TIME_CONF = 0x%08X\n",
568 mx3fb_read_reg(mx3fb, DI_DISP3_TIME_CONF));
569
570 return 0;
571}
572
573/**
574 * sdc_set_color_key() - set the transparent color key for SDC graphic plane.
575 * @mx3fb: mx3fb context.
576 * @channel: IPU DMAC channel ID.
577 * @enable: boolean to enable or disable color keyl.
578 * @color_key: 24-bit RGB color to use as transparent color key.
579 * @return: 0 on success or negative error code on failure.
580 */
581static int sdc_set_color_key(struct mx3fb_data *mx3fb, enum ipu_channel channel,
582 bool enable, uint32_t color_key)
583{
584 uint32_t reg, sdc_conf;
585 unsigned long lock_flags;
586
587 spin_lock_irqsave(&mx3fb->lock, lock_flags);
588
589 sdc_conf = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
590 if (channel == IDMAC_SDC_0)
591 sdc_conf &= ~SDC_COM_GWSEL;
592 else
593 sdc_conf |= SDC_COM_GWSEL;
594
595 if (enable) {
596 reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0xFF000000L;
597 mx3fb_write_reg(mx3fb, reg | (color_key & 0x00FFFFFFL),
598 SDC_GW_CTRL);
599
600 sdc_conf |= SDC_COM_KEY_COLOR_G;
601 } else {
602 sdc_conf &= ~SDC_COM_KEY_COLOR_G;
603 }
604 mx3fb_write_reg(mx3fb, sdc_conf, SDC_COM_CONF);
605
606 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
607
608 return 0;
609}
610
611/**
612 * sdc_set_global_alpha() - set global alpha blending modes.
613 * @mx3fb: mx3fb context.
614 * @enable: boolean to enable or disable global alpha blending. If disabled,
615 * per pixel blending is used.
616 * @alpha: global alpha value.
617 * @return: 0 on success or negative error code on failure.
618 */
619static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t alpha)
620{
621 uint32_t reg;
622 unsigned long lock_flags;
623
624 spin_lock_irqsave(&mx3fb->lock, lock_flags);
625
626 if (enable) {
627 reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0x00FFFFFFL;
628 mx3fb_write_reg(mx3fb, reg | ((uint32_t) alpha << 24), SDC_GW_CTRL);
629
630 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
631 mx3fb_write_reg(mx3fb, reg | SDC_COM_GLB_A, SDC_COM_CONF);
632 } else {
633 reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
634 mx3fb_write_reg(mx3fb, reg & ~SDC_COM_GLB_A, SDC_COM_CONF);
635 }
636
637 spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
638
639 return 0;
640}
641
642static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
643{
644 /* This might be board-specific */
645 mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
646 return;
647}
648
649static uint32_t bpp_to_pixfmt(int bpp)
650{
651 uint32_t pixfmt = 0;
652 switch (bpp) {
653 case 24:
654 pixfmt = IPU_PIX_FMT_BGR24;
655 break;
656 case 32:
657 pixfmt = IPU_PIX_FMT_BGR32;
658 break;
659 case 16:
660 pixfmt = IPU_PIX_FMT_RGB565;
661 break;
662 }
663 return pixfmt;
664}
665
666static int mx3fb_blank(int blank, struct fb_info *fbi);
667static int mx3fb_map_video_memory(struct fb_info *fbi);
668static int mx3fb_unmap_video_memory(struct fb_info *fbi);
669
670/**
671 * mx3fb_set_fix() - set fixed framebuffer parameters from variable settings.
672 * @info: framebuffer information pointer
673 * @return: 0 on success or negative error code on failure.
674 */
675static int mx3fb_set_fix(struct fb_info *fbi)
676{
677 struct fb_fix_screeninfo *fix = &fbi->fix;
678 struct fb_var_screeninfo *var = &fbi->var;
679
680 strncpy(fix->id, "DISP3 BG", 8);
681
682 fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
683
684 fix->type = FB_TYPE_PACKED_PIXELS;
685 fix->accel = FB_ACCEL_NONE;
686 fix->visual = FB_VISUAL_TRUECOLOR;
687 fix->xpanstep = 1;
688 fix->ypanstep = 1;
689
690 return 0;
691}
692
693static void mx3fb_dma_done(void *arg)
694{
695 struct idmac_tx_desc *tx_desc = to_tx_desc(arg);
696 struct dma_chan *chan = tx_desc->txd.chan;
697 struct idmac_channel *ichannel = to_idmac_chan(chan);
698 struct mx3fb_data *mx3fb = ichannel->client;
699 struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
700
701 dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq);
702
703 /* We only need one interrupt, it will be re-enabled as needed */
704 disable_irq(ichannel->eof_irq);
705
706 complete(&mx3_fbi->flip_cmpl);
707}
708
709/**
710 * mx3fb_set_par() - set framebuffer parameters and change the operating mode.
711 * @fbi: framebuffer information pointer.
712 * @return: 0 on success or negative error code on failure.
713 */
714static int mx3fb_set_par(struct fb_info *fbi)
715{
716 u32 mem_len;
717 struct ipu_di_signal_cfg sig_cfg;
718 enum ipu_panel mode = IPU_PANEL_TFT;
719 struct mx3fb_info *mx3_fbi = fbi->par;
720 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
721 struct idmac_channel *ichan = mx3_fbi->idmac_channel;
722 struct idmac_video_param *video = &ichan->params.video;
723 struct scatterlist *sg = mx3_fbi->sg;
724 size_t screen_size;
725
726 dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+');
727
728 mutex_lock(&mx3_fbi->mutex);
729
730 /* Total cleanup */
731 if (mx3_fbi->txd)
732 sdc_disable_channel(mx3_fbi);
733
734 mx3fb_set_fix(fbi);
735
736 mem_len = fbi->var.yres_virtual * fbi->fix.line_length;
737 if (mem_len > fbi->fix.smem_len) {
738 if (fbi->fix.smem_start)
739 mx3fb_unmap_video_memory(fbi);
740
741 fbi->fix.smem_len = mem_len;
742 if (mx3fb_map_video_memory(fbi) < 0) {
743 mutex_unlock(&mx3_fbi->mutex);
744 return -ENOMEM;
745 }
746 }
747
748 screen_size = fbi->fix.line_length * fbi->var.yres;
749
750 sg_init_table(&sg[0], 1);
751 sg_init_table(&sg[1], 1);
752
753 sg_dma_address(&sg[0]) = fbi->fix.smem_start;
754 sg_set_page(&sg[0], virt_to_page(fbi->screen_base),
755 fbi->fix.smem_len,
756 offset_in_page(fbi->screen_base));
757
758 if (mx3_fbi->ipu_ch == IDMAC_SDC_0) {
759 memset(&sig_cfg, 0, sizeof(sig_cfg));
760 if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
761 sig_cfg.Hsync_pol = true;
762 if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
763 sig_cfg.Vsync_pol = true;
764 if (fbi->var.sync & FB_SYNC_CLK_INVERT)
765 sig_cfg.clk_pol = true;
766 if (fbi->var.sync & FB_SYNC_DATA_INVERT)
767 sig_cfg.data_pol = true;
768 if (fbi->var.sync & FB_SYNC_OE_ACT_HIGH)
769 sig_cfg.enable_pol = true;
770 if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN)
771 sig_cfg.clkidle_en = true;
772 if (fbi->var.sync & FB_SYNC_CLK_SEL_EN)
773 sig_cfg.clksel_en = true;
774 if (fbi->var.sync & FB_SYNC_SHARP_MODE)
775 mode = IPU_PANEL_SHARP_TFT;
776
777 dev_dbg(fbi->device, "pixclock = %ul Hz\n",
778 (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
779
780 if (sdc_init_panel(mx3fb, mode,
781 (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
782 fbi->var.xres, fbi->var.yres,
783 (fbi->var.sync & FB_SYNC_SWAP_RGB) ?
784 IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
785 fbi->var.left_margin,
786 fbi->var.hsync_len,
787 fbi->var.right_margin +
788 fbi->var.hsync_len,
789 fbi->var.upper_margin,
790 fbi->var.vsync_len,
791 fbi->var.lower_margin +
792 fbi->var.vsync_len, sig_cfg) != 0) {
793 mutex_unlock(&mx3_fbi->mutex);
794 dev_err(fbi->device,
795 "mx3fb: Error initializing panel.\n");
796 return -EINVAL;
797 }
798 }
799
800 sdc_set_window_pos(mx3fb, mx3_fbi->ipu_ch, 0, 0);
801
802 mx3_fbi->cur_ipu_buf = 0;
803
804 video->out_pixel_fmt = bpp_to_pixfmt(fbi->var.bits_per_pixel);
805 video->out_width = fbi->var.xres;
806 video->out_height = fbi->var.yres;
807 video->out_stride = fbi->var.xres_virtual;
808
809 if (mx3_fbi->blank == FB_BLANK_UNBLANK)
810 sdc_enable_channel(mx3_fbi);
811
812 mutex_unlock(&mx3_fbi->mutex);
813
814 return 0;
815}
816
817/**
818 * mx3fb_check_var() - check and adjust framebuffer variable parameters.
819 * @var: framebuffer variable parameters
820 * @fbi: framebuffer information pointer
821 */
822static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
823{
824 struct mx3fb_info *mx3_fbi = fbi->par;
825 u32 vtotal;
826 u32 htotal;
827
828 dev_dbg(fbi->device, "%s\n", __func__);
829
830 if (var->xres_virtual < var->xres)
831 var->xres_virtual = var->xres;
832 if (var->yres_virtual < var->yres)
833 var->yres_virtual = var->yres;
834
835 if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
836 (var->bits_per_pixel != 16))
837 var->bits_per_pixel = default_bpp;
838
839 switch (var->bits_per_pixel) {
840 case 16:
841 var->red.length = 5;
842 var->red.offset = 11;
843 var->red.msb_right = 0;
844
845 var->green.length = 6;
846 var->green.offset = 5;
847 var->green.msb_right = 0;
848
849 var->blue.length = 5;
850 var->blue.offset = 0;
851 var->blue.msb_right = 0;
852
853 var->transp.length = 0;
854 var->transp.offset = 0;
855 var->transp.msb_right = 0;
856 break;
857 case 24:
858 var->red.length = 8;
859 var->red.offset = 16;
860 var->red.msb_right = 0;
861
862 var->green.length = 8;
863 var->green.offset = 8;
864 var->green.msb_right = 0;
865
866 var->blue.length = 8;
867 var->blue.offset = 0;
868 var->blue.msb_right = 0;
869
870 var->transp.length = 0;
871 var->transp.offset = 0;
872 var->transp.msb_right = 0;
873 break;
874 case 32:
875 var->red.length = 8;
876 var->red.offset = 16;
877 var->red.msb_right = 0;
878
879 var->green.length = 8;
880 var->green.offset = 8;
881 var->green.msb_right = 0;
882
883 var->blue.length = 8;
884 var->blue.offset = 0;
885 var->blue.msb_right = 0;
886
887 var->transp.length = 8;
888 var->transp.offset = 24;
889 var->transp.msb_right = 0;
890 break;
891 }
892
893 if (var->pixclock < 1000) {
894 htotal = var->xres + var->right_margin + var->hsync_len +
895 var->left_margin;
896 vtotal = var->yres + var->lower_margin + var->vsync_len +
897 var->upper_margin;
898 var->pixclock = (vtotal * htotal * 6UL) / 100UL;
899 var->pixclock = KHZ2PICOS(var->pixclock);
900 dev_dbg(fbi->device, "pixclock set for 60Hz refresh = %u ps\n",
901 var->pixclock);
902 }
903
904 var->height = -1;
905 var->width = -1;
906 var->grayscale = 0;
907
908 /* Preserve sync flags */
909 var->sync |= mx3_fbi->sync;
910 mx3_fbi->sync |= var->sync;
911
912 return 0;
913}
914
915static u32 chan_to_field(unsigned int chan, struct fb_bitfield *bf)
916{
917 chan &= 0xffff;
918 chan >>= 16 - bf->length;
919 return chan << bf->offset;
920}
921
922static int mx3fb_setcolreg(unsigned int regno, unsigned int red,
923 unsigned int green, unsigned int blue,
924 unsigned int trans, struct fb_info *fbi)
925{
926 struct mx3fb_info *mx3_fbi = fbi->par;
927 u32 val;
928 int ret = 1;
929
930 dev_dbg(fbi->device, "%s\n", __func__);
931
932 mutex_lock(&mx3_fbi->mutex);
933 /*
934 * If greyscale is true, then we convert the RGB value
935 * to greyscale no matter what visual we are using.
936 */
937 if (fbi->var.grayscale)
938 red = green = blue = (19595 * red + 38470 * green +
939 7471 * blue) >> 16;
940 switch (fbi->fix.visual) {
941 case FB_VISUAL_TRUECOLOR:
942 /*
943 * 16-bit True Colour. We encode the RGB value
944 * according to the RGB bitfield information.
945 */
946 if (regno < 16) {
947 u32 *pal = fbi->pseudo_palette;
948
949 val = chan_to_field(red, &fbi->var.red);
950 val |= chan_to_field(green, &fbi->var.green);
951 val |= chan_to_field(blue, &fbi->var.blue);
952
953 pal[regno] = val;
954
955 ret = 0;
956 }
957 break;
958
959 case FB_VISUAL_STATIC_PSEUDOCOLOR:
960 case FB_VISUAL_PSEUDOCOLOR:
961 break;
962 }
963 mutex_unlock(&mx3_fbi->mutex);
964
965 return ret;
966}
967
968/**
969 * mx3fb_blank() - blank the display.
970 */
971static int mx3fb_blank(int blank, struct fb_info *fbi)
972{
973 struct mx3fb_info *mx3_fbi = fbi->par;
974 struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
975
976 dev_dbg(fbi->device, "%s\n", __func__);
977
978 dev_dbg(fbi->device, "blank = %d\n", blank);
979
980 if (mx3_fbi->blank == blank)
981 return 0;
982
983 mutex_lock(&mx3_fbi->mutex);
984 mx3_fbi->blank = blank;
985
986 switch (blank) {
987 case FB_BLANK_POWERDOWN:
988 case FB_BLANK_VSYNC_SUSPEND:
989 case FB_BLANK_HSYNC_SUSPEND:
990 case FB_BLANK_NORMAL:
991 sdc_disable_channel(mx3_fbi);
992 sdc_set_brightness(mx3fb, 0);
993 break;
994 case FB_BLANK_UNBLANK:
995 sdc_enable_channel(mx3_fbi);
996 sdc_set_brightness(mx3fb, mx3fb->backlight_level);
997 break;
998 }
999 mutex_unlock(&mx3_fbi->mutex);
1000
1001 return 0;
1002}
1003
1004/**
1005 * mx3fb_pan_display() - pan or wrap the display
1006 * @var: variable screen buffer information.
1007 * @info: framebuffer information pointer.
1008 *
1009 * We look only at xoffset, yoffset and the FB_VMODE_YWRAP flag
1010 */
1011static int mx3fb_pan_display(struct fb_var_screeninfo *var,
1012 struct fb_info *fbi)
1013{
1014 struct mx3fb_info *mx3_fbi = fbi->par;
1015 u32 y_bottom;
1016 unsigned long base;
1017 off_t offset;
1018 dma_cookie_t cookie;
1019 struct scatterlist *sg = mx3_fbi->sg;
1020 struct dma_chan *dma_chan = &mx3_fbi->idmac_channel->dma_chan;
1021 struct dma_async_tx_descriptor *txd;
1022 int ret;
1023
1024 dev_dbg(fbi->device, "%s [%c]\n", __func__,
1025 list_empty(&mx3_fbi->idmac_channel->queue) ? '-' : '+');
1026
1027 if (var->xoffset > 0) {
1028 dev_dbg(fbi->device, "x panning not supported\n");
1029 return -EINVAL;
1030 }
1031
1032 if (fbi->var.xoffset == var->xoffset &&
1033 fbi->var.yoffset == var->yoffset)
1034 return 0; /* No change, do nothing */
1035
1036 y_bottom = var->yoffset;
1037
1038 if (!(var->vmode & FB_VMODE_YWRAP))
1039 y_bottom += var->yres;
1040
1041 if (y_bottom > fbi->var.yres_virtual)
1042 return -EINVAL;
1043
1044 mutex_lock(&mx3_fbi->mutex);
1045
1046 offset = (var->yoffset * var->xres_virtual + var->xoffset) *
1047 (var->bits_per_pixel / 8);
1048 base = fbi->fix.smem_start + offset;
1049
1050 dev_dbg(fbi->device, "Updating SDC BG buf %d address=0x%08lX\n",
1051 mx3_fbi->cur_ipu_buf, base);
1052
1053 /*
1054 * We enable the End of Frame interrupt, which will free a tx-descriptor,
1055 * which we will need for the next device_prep_slave_sg(). The
1056 * IRQ-handler will disable the IRQ again.
1057 */
1058 init_completion(&mx3_fbi->flip_cmpl);
1059 enable_irq(mx3_fbi->idmac_channel->eof_irq);
1060
1061 ret = wait_for_completion_timeout(&mx3_fbi->flip_cmpl, HZ / 10);
1062 if (ret <= 0) {
1063 mutex_unlock(&mx3_fbi->mutex);
1064 dev_info(fbi->device, "Panning failed due to %s\n", ret < 0 ?
1065 "user interrupt" : "timeout");
1066 return ret ? : -ETIMEDOUT;
1067 }
1068
1069 mx3_fbi->cur_ipu_buf = !mx3_fbi->cur_ipu_buf;
1070
1071 sg_dma_address(&sg[mx3_fbi->cur_ipu_buf]) = base;
1072 sg_set_page(&sg[mx3_fbi->cur_ipu_buf],
1073 virt_to_page(fbi->screen_base + offset), fbi->fix.smem_len,
1074 offset_in_page(fbi->screen_base + offset));
1075
1076 txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
1077 mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
1078 if (!txd) {
1079 dev_err(fbi->device,
1080 "Error preparing a DMA transaction descriptor.\n");
1081 mutex_unlock(&mx3_fbi->mutex);
1082 return -EIO;
1083 }
1084
1085 txd->callback_param = txd;
1086 txd->callback = mx3fb_dma_done;
1087
1088 /*
1089 * Emulate original mx3fb behaviour: each new call to idmac_tx_submit()
1090 * should switch to another buffer
1091 */
1092 cookie = txd->tx_submit(txd);
1093 dev_dbg(fbi->device, "%d: Submit %p #%d\n", __LINE__, txd, cookie);
1094 if (cookie < 0) {
1095 dev_err(fbi->device,
1096 "Error updating SDC buf %d to address=0x%08lX\n",
1097 mx3_fbi->cur_ipu_buf, base);
1098 mutex_unlock(&mx3_fbi->mutex);
1099 return -EIO;
1100 }
1101
1102 if (mx3_fbi->txd)
1103 async_tx_ack(mx3_fbi->txd);
1104 mx3_fbi->txd = txd;
1105
1106 fbi->var.xoffset = var->xoffset;
1107 fbi->var.yoffset = var->yoffset;
1108
1109 if (var->vmode & FB_VMODE_YWRAP)
1110 fbi->var.vmode |= FB_VMODE_YWRAP;
1111 else
1112 fbi->var.vmode &= ~FB_VMODE_YWRAP;
1113
1114 mutex_unlock(&mx3_fbi->mutex);
1115
1116 dev_dbg(fbi->device, "Update complete\n");
1117
1118 return 0;
1119}
1120
1121/*
1122 * This structure contains the pointers to the control functions that are
1123 * invoked by the core framebuffer driver to perform operations like
1124 * blitting, rectangle filling, copy regions and cursor definition.
1125 */
1126static struct fb_ops mx3fb_ops = {
1127 .owner = THIS_MODULE,
1128 .fb_set_par = mx3fb_set_par,
1129 .fb_check_var = mx3fb_check_var,
1130 .fb_setcolreg = mx3fb_setcolreg,
1131 .fb_pan_display = mx3fb_pan_display,
1132 .fb_fillrect = cfb_fillrect,
1133 .fb_copyarea = cfb_copyarea,
1134 .fb_imageblit = cfb_imageblit,
1135 .fb_blank = mx3fb_blank,
1136};
1137
1138#ifdef CONFIG_PM
1139/*
1140 * Power management hooks. Note that we won't be called from IRQ context,
1141 * unlike the blank functions above, so we may sleep.
1142 */
1143
1144/*
1145 * Suspends the framebuffer and blanks the screen. Power management support
1146 */
1147static int mx3fb_suspend(struct platform_device *pdev, pm_message_t state)
1148{
1149 struct mx3fb_data *drv_data = platform_get_drvdata(pdev);
1150 struct mx3fb_info *mx3_fbi = drv_data->fbi->par;
1151
1152 acquire_console_sem();
1153 fb_set_suspend(drv_data->fbi, 1);
1154 release_console_sem();
1155
1156 if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
1157 sdc_disable_channel(mx3_fbi);
1158 sdc_set_brightness(mx3fb, 0);
1159
1160 }
1161 return 0;
1162}
1163
1164/*
1165 * Resumes the framebuffer and unblanks the screen. Power management support
1166 */
1167static int mx3fb_resume(struct platform_device *pdev)
1168{
1169 struct mx3fb_data *drv_data = platform_get_drvdata(pdev);
1170 struct mx3fb_info *mx3_fbi = drv_data->fbi->par;
1171
1172 if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
1173 sdc_enable_channel(mx3_fbi);
1174 sdc_set_brightness(mx3fb, drv_data->backlight_level);
1175 }
1176
1177 acquire_console_sem();
1178 fb_set_suspend(drv_data->fbi, 0);
1179 release_console_sem();
1180
1181 return 0;
1182}
1183#else
1184#define mx3fb_suspend NULL
1185#define mx3fb_resume NULL
1186#endif
1187
1188/*
1189 * Main framebuffer functions
1190 */
1191
1192/**
1193 * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
1194 * @fbi: framebuffer information pointer
1195 * @return: Error code indicating success or failure
1196 *
1197 * This buffer is remapped into a non-cached, non-buffered, memory region to
1198 * allow palette and pixel writes to occur without flushing the cache. Once this
1199 * area is remapped, all virtual memory access to the video memory should occur
1200 * at the new region.
1201 */
1202static int mx3fb_map_video_memory(struct fb_info *fbi)
1203{
1204 int retval = 0;
1205 dma_addr_t addr;
1206
1207 fbi->screen_base = dma_alloc_writecombine(fbi->device,
1208 fbi->fix.smem_len,
1209 &addr, GFP_DMA);
1210
1211 if (!fbi->screen_base) {
1212 dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
1213 fbi->fix.smem_len);
1214 retval = -EBUSY;
1215 goto err0;
1216 }
1217
1218 fbi->fix.smem_start = addr;
1219
1220 dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
1221 (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
1222
1223 fbi->screen_size = fbi->fix.smem_len;
1224
1225 /* Clear the screen */
1226 memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
1227
1228 return 0;
1229
1230err0:
1231 fbi->fix.smem_len = 0;
1232 fbi->fix.smem_start = 0;
1233 fbi->screen_base = NULL;
1234 return retval;
1235}
1236
1237/**
1238 * mx3fb_unmap_video_memory() - de-allocate frame buffer memory.
1239 * @fbi: framebuffer information pointer
1240 * @return: error code indicating success or failure
1241 */
1242static int mx3fb_unmap_video_memory(struct fb_info *fbi)
1243{
1244 dma_free_writecombine(fbi->device, fbi->fix.smem_len,
1245 fbi->screen_base, fbi->fix.smem_start);
1246
1247 fbi->screen_base = 0;
1248 fbi->fix.smem_start = 0;
1249 fbi->fix.smem_len = 0;
1250 return 0;
1251}
1252
1253/**
1254 * mx3fb_init_fbinfo() - initialize framebuffer information object.
1255 * @return: initialized framebuffer structure.
1256 */
1257static struct fb_info *mx3fb_init_fbinfo(struct device *dev, struct fb_ops *ops)
1258{
1259 struct fb_info *fbi;
1260 struct mx3fb_info *mx3fbi;
1261 int ret;
1262
1263 /* Allocate sufficient memory for the fb structure */
1264 fbi = framebuffer_alloc(sizeof(struct mx3fb_info), dev);
1265 if (!fbi)
1266 return NULL;
1267
1268 mx3fbi = fbi->par;
1269 mx3fbi->cookie = -EINVAL;
1270 mx3fbi->cur_ipu_buf = 0;
1271
1272 fbi->var.activate = FB_ACTIVATE_NOW;
1273
1274 fbi->fbops = ops;
1275 fbi->flags = FBINFO_FLAG_DEFAULT;
1276 fbi->pseudo_palette = mx3fbi->pseudo_palette;
1277
1278 mutex_init(&mx3fbi->mutex);
1279
1280 /* Allocate colormap */
1281 ret = fb_alloc_cmap(&fbi->cmap, 16, 0);
1282 if (ret < 0) {
1283 framebuffer_release(fbi);
1284 return NULL;
1285 }
1286
1287 return fbi;
1288}
1289
1290static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
1291{
1292 struct device *dev = mx3fb->dev;
1293 struct mx3fb_platform_data *mx3fb_pdata = dev->platform_data;
1294 const char *name = mx3fb_pdata->name;
1295 unsigned int irq;
1296 struct fb_info *fbi;
1297 struct mx3fb_info *mx3fbi;
1298 const struct fb_videomode *mode;
1299 int ret, num_modes;
1300
1301 ichan->client = mx3fb;
1302 irq = ichan->eof_irq;
1303
1304 if (ichan->dma_chan.chan_id != IDMAC_SDC_0)
1305 return -EINVAL;
1306
1307 fbi = mx3fb_init_fbinfo(dev, &mx3fb_ops);
1308 if (!fbi)
1309 return -ENOMEM;
1310
1311 if (!fb_mode)
1312 fb_mode = name;
1313
1314 if (!fb_mode) {
1315 ret = -EINVAL;
1316 goto emode;
1317 }
1318
1319 if (mx3fb_pdata->mode && mx3fb_pdata->num_modes) {
1320 mode = mx3fb_pdata->mode;
1321 num_modes = mx3fb_pdata->num_modes;
1322 } else {
1323 mode = mx3fb_modedb;
1324 num_modes = ARRAY_SIZE(mx3fb_modedb);
1325 }
1326
1327 if (!fb_find_mode(&fbi->var, fbi, fb_mode, mode,
1328 num_modes, NULL, default_bpp)) {
1329 ret = -EBUSY;
1330 goto emode;
1331 }
1332
1333 fb_videomode_to_modelist(mode, num_modes, &fbi->modelist);
1334
1335 /* Default Y virtual size is 2x panel size */
1336 fbi->var.yres_virtual = fbi->var.yres * 2;
1337
1338 mx3fb->fbi = fbi;
1339
1340 /* set Display Interface clock period */
1341 mx3fb_write_reg(mx3fb, 0x00100010L, DI_HSP_CLK_PER);
1342 /* Might need to trigger HSP clock change - see 44.3.3.8.5 */
1343
1344 sdc_set_brightness(mx3fb, 255);
1345 sdc_set_global_alpha(mx3fb, true, 0xFF);
1346 sdc_set_color_key(mx3fb, IDMAC_SDC_0, false, 0);
1347
1348 mx3fbi = fbi->par;
1349 mx3fbi->idmac_channel = ichan;
1350 mx3fbi->ipu_ch = ichan->dma_chan.chan_id;
1351 mx3fbi->mx3fb = mx3fb;
1352 mx3fbi->blank = FB_BLANK_NORMAL;
1353
1354 init_completion(&mx3fbi->flip_cmpl);
1355 disable_irq(ichan->eof_irq);
1356 dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
1357 ret = mx3fb_set_par(fbi);
1358 if (ret < 0)
1359 goto esetpar;
1360
1361 mx3fb_blank(FB_BLANK_UNBLANK, fbi);
1362
1363 dev_info(dev, "mx3fb: fb registered, using mode %s\n", fb_mode);
1364
1365 ret = register_framebuffer(fbi);
1366 if (ret < 0)
1367 goto erfb;
1368
1369 return 0;
1370
1371erfb:
1372esetpar:
1373emode:
1374 fb_dealloc_cmap(&fbi->cmap);
1375 framebuffer_release(fbi);
1376
1377 return ret;
1378}
1379
1380static bool chan_filter(struct dma_chan *chan, void *arg)
1381{
1382 struct dma_chan_request *rq = arg;
1383 struct device *dev;
1384 struct mx3fb_platform_data *mx3fb_pdata;
1385
1386 if (!rq)
1387 return false;
1388
1389 dev = rq->mx3fb->dev;
1390 mx3fb_pdata = dev->platform_data;
1391
1392 return rq->id == chan->chan_id &&
1393 mx3fb_pdata->dma_dev == chan->device->dev;
1394}
1395
1396static void release_fbi(struct fb_info *fbi)
1397{
1398 mx3fb_unmap_video_memory(fbi);
1399
1400 fb_dealloc_cmap(&fbi->cmap);
1401
1402 unregister_framebuffer(fbi);
1403 framebuffer_release(fbi);
1404}
1405
1406static int mx3fb_probe(struct platform_device *pdev)
1407{
1408 struct device *dev = &pdev->dev;
1409 int ret;
1410 struct resource *sdc_reg;
1411 struct mx3fb_data *mx3fb;
1412 dma_cap_mask_t mask;
1413 struct dma_chan *chan;
1414 struct dma_chan_request rq;
1415
1416 /*
1417 * Display Interface (DI) and Synchronous Display Controller (SDC)
1418 * registers
1419 */
1420 sdc_reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1421 if (!sdc_reg)
1422 return -EINVAL;
1423
1424 mx3fb = kzalloc(sizeof(*mx3fb), GFP_KERNEL);
1425 if (!mx3fb)
1426 return -ENOMEM;
1427
1428 spin_lock_init(&mx3fb->lock);
1429
1430 mx3fb->reg_base = ioremap(sdc_reg->start, resource_size(sdc_reg));
1431 if (!mx3fb->reg_base) {
1432 ret = -ENOMEM;
1433 goto eremap;
1434 }
1435
1436 pr_debug("Remapped %x to %x at %p\n", sdc_reg->start, sdc_reg->end,
1437 mx3fb->reg_base);
1438
1439 /* IDMAC interface */
1440 dmaengine_get();
1441
1442 mx3fb->dev = dev;
1443 platform_set_drvdata(pdev, mx3fb);
1444
1445 rq.mx3fb = mx3fb;
1446
1447 dma_cap_zero(mask);
1448 dma_cap_set(DMA_SLAVE, mask);
1449 dma_cap_set(DMA_PRIVATE, mask);
1450 rq.id = IDMAC_SDC_0;
1451 chan = dma_request_channel(mask, chan_filter, &rq);
1452 if (!chan) {
1453 ret = -EBUSY;
1454 goto ersdc0;
1455 }
1456
1457 ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
1458 if (ret < 0)
1459 goto eisdc0;
1460
1461 mx3fb->backlight_level = 255;
1462
1463 return 0;
1464
1465eisdc0:
1466 dma_release_channel(chan);
1467ersdc0:
1468 dmaengine_put();
1469 iounmap(mx3fb->reg_base);
1470eremap:
1471 kfree(mx3fb);
1472 dev_err(dev, "mx3fb: failed to register fb\n");
1473 return ret;
1474}
1475
1476static int mx3fb_remove(struct platform_device *dev)
1477{
1478 struct mx3fb_data *mx3fb = platform_get_drvdata(dev);
1479 struct fb_info *fbi = mx3fb->fbi;
1480 struct mx3fb_info *mx3_fbi = fbi->par;
1481 struct dma_chan *chan;
1482
1483 chan = &mx3_fbi->idmac_channel->dma_chan;
1484 release_fbi(fbi);
1485
1486 dma_release_channel(chan);
1487 dmaengine_put();
1488
1489 iounmap(mx3fb->reg_base);
1490 kfree(mx3fb);
1491 return 0;
1492}
1493
1494static struct platform_driver mx3fb_driver = {
1495 .driver = {
1496 .name = MX3FB_NAME,
1497 },
1498 .probe = mx3fb_probe,
1499 .remove = mx3fb_remove,
1500 .suspend = mx3fb_suspend,
1501 .resume = mx3fb_resume,
1502};
1503
1504/*
1505 * Parse user specified options (`video=mx3fb:')
1506 * example:
1507 * video=mx3fb:bpp=16
1508 */
1509static int mx3fb_setup(void)
1510{
1511#ifndef MODULE
1512 char *opt, *options = NULL;
1513
1514 if (fb_get_options("mx3fb", &options))
1515 return -ENODEV;
1516
1517 if (!options || !*options)
1518 return 0;
1519
1520 while ((opt = strsep(&options, ",")) != NULL) {
1521 if (!*opt)
1522 continue;
1523 if (!strncmp(opt, "bpp=", 4))
1524 default_bpp = simple_strtoul(opt + 4, NULL, 0);
1525 else
1526 fb_mode = opt;
1527 }
1528#endif
1529
1530 return 0;
1531}
1532
1533static int __init mx3fb_init(void)
1534{
1535 int ret = mx3fb_setup();
1536
1537 if (ret < 0)
1538 return ret;
1539
1540 ret = platform_driver_register(&mx3fb_driver);
1541 return ret;
1542}
1543
1544static void __exit mx3fb_exit(void)
1545{
1546 platform_driver_unregister(&mx3fb_driver);
1547}
1548
1549module_init(mx3fb_init);
1550module_exit(mx3fb_exit);
1551
1552MODULE_AUTHOR("Freescale Semiconductor, Inc.");
1553MODULE_DESCRIPTION("MX3 framebuffer driver");
1554MODULE_ALIAS("platform:" MX3FB_NAME);
1555MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/nvidia/nv_i2c.c b/drivers/video/nvidia/nv_i2c.c
index 6fd7cb8f9b8e..6aaddb4f6788 100644
--- a/drivers/video/nvidia/nv_i2c.c
+++ b/drivers/video/nvidia/nv_i2c.c
@@ -87,7 +87,6 @@ static int nvidia_setup_i2c_bus(struct nvidia_i2c_chan *chan, const char *name,
87 87
88 strcpy(chan->adapter.name, name); 88 strcpy(chan->adapter.name, name);
89 chan->adapter.owner = THIS_MODULE; 89 chan->adapter.owner = THIS_MODULE;
90 chan->adapter.id = I2C_HW_B_NVIDIA;
91 chan->adapter.class = i2c_class; 90 chan->adapter.class = i2c_class;
92 chan->adapter.algo_data = &chan->algo; 91 chan->adapter.algo_data = &chan->algo;
93 chan->adapter.dev.parent = &chan->par->pci_dev->dev; 92 chan->adapter.dev.parent = &chan->par->pci_dev->dev;
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 38ac805db97d..87f826e4c958 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -1006,7 +1006,7 @@ static int ps3fb_xdr_settings(u64 xdr_lpar, struct device *dev)
1006 __func__, status); 1006 __func__, status);
1007 return -ENXIO; 1007 return -ENXIO;
1008 } 1008 }
1009 dev_dbg(dev, "video:%p ioif:%lx lpar:%lx size:%lx\n", 1009 dev_dbg(dev, "video:%p ioif:%lx lpar:%llx size:%lx\n",
1010 ps3fb_videomemory.address, GPU_IOIF, xdr_lpar, 1010 ps3fb_videomemory.address, GPU_IOIF, xdr_lpar,
1011 ps3fb_videomemory.size); 1011 ps3fb_videomemory.size);
1012 1012
@@ -1133,7 +1133,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
1133 __func__, status); 1133 __func__, status);
1134 goto err; 1134 goto err;
1135 } 1135 }
1136 dev_dbg(&dev->core, "ddr:lpar:0x%lx\n", ddr_lpar); 1136 dev_dbg(&dev->core, "ddr:lpar:0x%llx\n", ddr_lpar);
1137 1137
1138 status = lv1_gpu_context_allocate(ps3fb.memory_handle, 0, 1138 status = lv1_gpu_context_allocate(ps3fb.memory_handle, 0,
1139 &ps3fb.context_handle, 1139 &ps3fb.context_handle,
diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c
index 783d4adffb93..574b29e9f8f2 100644
--- a/drivers/video/savage/savagefb-i2c.c
+++ b/drivers/video/savage/savagefb-i2c.c
@@ -137,7 +137,6 @@ static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
137 if (chan->par) { 137 if (chan->par) {
138 strcpy(chan->adapter.name, name); 138 strcpy(chan->adapter.name, name);
139 chan->adapter.owner = THIS_MODULE; 139 chan->adapter.owner = THIS_MODULE;
140 chan->adapter.id = I2C_HW_B_SAVAGE;
141 chan->adapter.algo_data = &chan->algo; 140 chan->adapter.algo_data = &chan->algo;
142 chan->adapter.dev.parent = &chan->par->pcidev->dev; 141 chan->adapter.dev.parent = &chan->par->pcidev->dev;
143 chan->algo.udelay = 10; 142 chan->algo.udelay = 10;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 8dc7109d61b7..2ba8f95516a0 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -298,6 +298,14 @@ static int decrease_reservation(unsigned long nr_pages)
298 frame_list[i] = pfn_to_mfn(pfn); 298 frame_list[i] = pfn_to_mfn(pfn);
299 299
300 scrub_page(page); 300 scrub_page(page);
301
302 if (!PageHighMem(page)) {
303 ret = HYPERVISOR_update_va_mapping(
304 (unsigned long)__va(pfn << PAGE_SHIFT),
305 __pte_ma(0), 0);
306 BUG_ON(ret);
307 }
308
301 } 309 }
302 310
303 /* Ensure that ballooned highmem pages don't have kmaps. */ 311 /* Ensure that ballooned highmem pages don't have kmaps. */
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index 875a4c59c594..a9592d981b10 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -291,7 +291,7 @@ static void watch_fired(struct xenbus_watch *watch,
291static int xenbus_write_transaction(unsigned msg_type, 291static int xenbus_write_transaction(unsigned msg_type,
292 struct xenbus_file_priv *u) 292 struct xenbus_file_priv *u)
293{ 293{
294 int rc, ret; 294 int rc;
295 void *reply; 295 void *reply;
296 struct xenbus_transaction_holder *trans = NULL; 296 struct xenbus_transaction_holder *trans = NULL;
297 LIST_HEAD(staging_q); 297 LIST_HEAD(staging_q);
@@ -326,15 +326,14 @@ static int xenbus_write_transaction(unsigned msg_type,
326 } 326 }
327 327
328 mutex_lock(&u->reply_mutex); 328 mutex_lock(&u->reply_mutex);
329 ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); 329 rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
330 if (!ret) 330 if (!rc)
331 ret = queue_reply(&staging_q, reply, u->u.msg.len); 331 rc = queue_reply(&staging_q, reply, u->u.msg.len);
332 if (!ret) { 332 if (!rc) {
333 list_splice_tail(&staging_q, &u->read_buffers); 333 list_splice_tail(&staging_q, &u->read_buffers);
334 wake_up(&u->read_waitq); 334 wake_up(&u->read_waitq);
335 } else { 335 } else {
336 queue_cleanup(&staging_q); 336 queue_cleanup(&staging_q);
337 rc = ret;
338 } 337 }
339 mutex_unlock(&u->reply_mutex); 338 mutex_unlock(&u->reply_mutex);
340 339