aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_root.c97
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c11
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/libahci.c16
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/pata_cmd64x.c6
-rw-r--r--drivers/ata/pata_legacy.c15
-rw-r--r--drivers/ata/pata_winbond.c282
-rw-r--r--drivers/ata/sata_dwc_460ex.c6
-rw-r--r--drivers/ata/sata_mv.c44
-rw-r--r--drivers/base/firmware_class.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/char/agp/intel-agp.c40
-rw-r--r--drivers/char/agp/intel-agp.h21
-rw-r--r--drivers/char/agp/intel-gtt.c66
-rw-r--r--drivers/char/hangcheck-timer.c2
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/hvsi.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c2
-rw-r--r--drivers/char/ip2/ip2main.c4
-rw-r--r--drivers/char/pty.c4
-rw-r--r--drivers/char/rocket.c1
-rw-r--r--drivers/char/synclink_gt.c4
-rw-r--r--drivers/char/sysrq.c53
-rw-r--r--drivers/char/tty_io.c94
-rw-r--r--drivers/char/vt.c26
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c1
-rw-r--r--drivers/edac/amd64_edac.c10
-rw-r--r--drivers/edac/edac_mce_amd.c17
-rw-r--r--drivers/firewire/core-transaction.c13
-rw-r--r--drivers/firewire/net.c28
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firewire/sbp2.c23
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c24
-rw-r--r--drivers/gpu/drm/drm_drv.c25
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c5
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c24
-rw-r--r--drivers/gpu/drm/drm_modes.c5
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c30
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c28
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo.h7
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c53
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c123
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c66
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h72
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c365
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c271
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c135
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h15
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c97
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c53
-rw-r--r--drivers/gpu/drm/i915/intel_display.c725
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c654
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h39
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c136
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c77
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c106
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c101
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c111
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c111
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2148
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h50
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c162
-rw-r--r--drivers/gpu/drm/mga/mga_state.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c100
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c13
-rw-r--r--drivers/gpu/drm/r128/r128_state.c35
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c99
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c48
-rw-r--r--drivers/gpu/drm/radeon/r600.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c105
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c223
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c56
-rw-r--r--drivers/gpu/drm/radeon/rv770.c61
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c8
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c12
-rw-r--r--drivers/gpu/drm/via/via_dma.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c34
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-egalax.c9
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-picolcd.c4
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hwmon/Kconfig6
-rw-r--r--drivers/hwmon/ads7871.c38
-rw-r--r--drivers/hwmon/coretemp.c1
-rw-r--r--drivers/hwmon/f71882fg.c83
-rw-r--r--drivers/hwmon/k8temp.c35
-rw-r--r--drivers/ieee1394/ohci1394.c2
-rw-r--r--drivers/input/keyboard/hil_kbd.c12
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mousedev.c8
-rw-r--r--drivers/macintosh/via-pmu.c42
-rw-r--r--drivers/md/.gitignore4
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/md.c65
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c21
-rw-r--r--drivers/md/raid10.c17
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/media/dvb/mantis/Kconfig2
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/maps/physmap_of.c1
-rw-r--r--drivers/mtd/nand/nand_base.c11
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/ubi/Kconfig.debug2
-rw-r--r--drivers/mtd/ubi/cdev.c12
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/3c59x.c30
-rw-r--r--drivers/net/b44.c9
-rw-r--r--drivers/net/benet/be.h1
-rw-r--r--drivers/net/benet/be_cmds.c8
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c1
-rw-r--r--drivers/net/benet/be_hw.h7
-rw-r--r--drivers/net/benet/be_main.c47
-rw-r--r--drivers/net/bonding/bond_main.c56
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/ibm_newemac/debug.c2
-rw-r--r--drivers/net/ks8851.c39
-rw-r--r--drivers/net/netxen/netxen_nic_main.c9
-rw-r--r--drivers/net/niu.c16
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/pxa168_eth.c60
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/qlge/qlge_main.c4
-rw-r--r--drivers/net/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/usb/ipheth.c12
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c6
-rw-r--r--drivers/pci/hotplug/pciehp.h16
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c4
-rw-r--r--drivers/pci/hotplug/pciehp_core.c4
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c9
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c36
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c14
-rw-r--r--drivers/pci/pcie/pme.c (renamed from drivers/pci/pcie/pme/pcie_pme.c)66
-rw-r--r--drivers/pci/pcie/pme/Makefile8
-rw-r--r--drivers/pci/pcie/pme/pcie_pme.h28
-rw-r--r--drivers/pci/pcie/pme/pcie_pme_acpi.c54
-rw-r--r--drivers/pci/pcie/portdrv.h22
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/portdrv_core.c53
-rw-r--r--drivers/pci/pcie/portdrv_pci.c38
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/platform/x86/Kconfig4
-rw-r--r--drivers/platform/x86/asus_acpi.c6
-rw-r--r--drivers/platform/x86/compal-laptop.c9
-rw-r--r--drivers/platform/x86/dell-laptop.c7
-rw-r--r--drivers/platform/x86/hp-wmi.c64
-rw-r--r--drivers/platform/x86/intel_ips.c15
-rw-r--r--drivers/platform/x86/intel_rar_register.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c171
-rw-r--r--drivers/s390/char/ctrlchar.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/serial/68328serial.c29
-rw-r--r--drivers/serial/8250_early.c4
-rw-r--r--drivers/serial/bfin_sport_uart.c2
-rw-r--r--drivers/serial/of_serial.c3
-rw-r--r--drivers/serial/sn_console.c2
-rw-r--r--drivers/serial/suncore.c15
-rw-r--r--drivers/spi/coldfire_qspi.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c4
-rw-r--r--drivers/staging/batman-adv/hard-interface.c29
-rw-r--r--drivers/staging/batman-adv/icmp_socket.c12
-rw-r--r--drivers/staging/batman-adv/main.c7
-rw-r--r--drivers/staging/batman-adv/originator.c14
-rw-r--r--drivers/staging/batman-adv/routing.c16
-rw-r--r--drivers/staging/batman-adv/types.h1
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c1
-rw-r--r--drivers/staging/hv/netvsc_drv.c3
-rw-r--r--drivers/staging/hv/ring_buffer.c3
-rw-r--r--drivers/staging/hv/storvsc_api.h4
-rw-r--r--drivers/staging/hv/storvsc_drv.c11
-rw-r--r--drivers/staging/octeon/Kconfig2
-rw-r--r--drivers/staging/pohmelfs/path_entry.c8
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c41
-rw-r--r--drivers/staging/sep/Kconfig10
-rw-r--r--drivers/staging/sep/Makefile2
-rw-r--r--drivers/staging/sep/TODO8
-rw-r--r--drivers/staging/sep/sep_dev.h110
-rw-r--r--drivers/staging/sep/sep_driver.c2742
-rw-r--r--drivers/staging/sep/sep_driver_api.h425
-rw-r--r--drivers/staging/sep/sep_driver_config.h225
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h232
-rw-r--r--drivers/staging/spectra/Kconfig1
-rw-r--r--drivers/staging/spectra/ffsport.c30
-rw-r--r--drivers/staging/spectra/flash.c420
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c3
-rw-r--r--drivers/staging/zram/zram_drv.c1
-rw-r--r--drivers/usb/atm/cxacru.c24
-rw-r--r--drivers/usb/class/cdc-acm.c23
-rw-r--r--drivers/usb/core/message.c22
-rw-r--r--drivers/usb/gadget/composite.c4
-rw-r--r--drivers/usb/gadget/m66592-udc.c1
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/rndis.c12
-rw-r--r--drivers/usb/gadget/rndis.h2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c2
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c12
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/otg/twl4030-usb.c6
-rw-r--r--drivers/usb/serial/cp210x.c15
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h21
-rw-r--r--drivers/usb/serial/generic.c11
-rw-r--r--drivers/usb/serial/io_ti.c4
-rw-r--r--drivers/usb/serial/mos7840.c32
-rw-r--r--drivers/usb/serial/navman.c1
-rw-r--r--drivers/usb/serial/option.c126
-rw-r--r--drivers/usb/serial/pl2303.c3
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/ssu100.c258
-rw-r--r--drivers/usb/serial/usb-serial.c23
-rw-r--r--drivers/vhost/vhost.c85
-rw-r--r--drivers/video/amba-clcd.c10
-rw-r--r--drivers/video/matrox/matroxfb_base.h4
-rw-r--r--drivers/xen/events.c21
-rw-r--r--drivers/xen/manage.c2
283 files changed, 5821 insertions, 9099 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1f67057af2a5..3ba8d1f44a73 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,7 +33,6 @@
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/pci-acpi.h> 35#include <linux/pci-acpi.h>
36#include <linux/pci-aspm.h>
37#include <linux/acpi.h> 36#include <linux/acpi.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
39#include <acpi/acpi_bus.h> 38#include <acpi/acpi_bus.h>
@@ -226,22 +225,31 @@ static acpi_status acpi_pci_run_osc(acpi_handle handle,
226 return status; 225 return status;
227} 226}
228 227
229static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 flags) 228static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
229 u32 support,
230 u32 *control)
230{ 231{
231 acpi_status status; 232 acpi_status status;
232 u32 support_set, result, capbuf[3]; 233 u32 result, capbuf[3];
234
235 support &= OSC_PCI_SUPPORT_MASKS;
236 support |= root->osc_support_set;
233 237
234 /* do _OSC query for all possible controls */
235 support_set = root->osc_support_set | (flags & OSC_PCI_SUPPORT_MASKS);
236 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; 238 capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
237 capbuf[OSC_SUPPORT_TYPE] = support_set; 239 capbuf[OSC_SUPPORT_TYPE] = support;
238 capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; 240 if (control) {
241 *control &= OSC_PCI_CONTROL_MASKS;
242 capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
243 } else {
244 /* Run _OSC query for all possible controls. */
245 capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
246 }
239 247
240 status = acpi_pci_run_osc(root->device->handle, capbuf, &result); 248 status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
241 if (ACPI_SUCCESS(status)) { 249 if (ACPI_SUCCESS(status)) {
242 root->osc_support_set = support_set; 250 root->osc_support_set = support;
243 root->osc_control_qry = result; 251 if (control)
244 root->osc_queried = 1; 252 *control = result;
245 } 253 }
246 return status; 254 return status;
247} 255}
@@ -255,7 +263,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
255 if (ACPI_FAILURE(status)) 263 if (ACPI_FAILURE(status))
256 return status; 264 return status;
257 mutex_lock(&osc_lock); 265 mutex_lock(&osc_lock);
258 status = acpi_pci_query_osc(root, flags); 266 status = acpi_pci_query_osc(root, flags, NULL);
259 mutex_unlock(&osc_lock); 267 mutex_unlock(&osc_lock);
260 return status; 268 return status;
261} 269}
@@ -365,55 +373,70 @@ out:
365EXPORT_SYMBOL_GPL(acpi_get_pci_dev); 373EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
366 374
367/** 375/**
368 * acpi_pci_osc_control_set - commit requested control to Firmware 376 * acpi_pci_osc_control_set - Request control of PCI root _OSC features.
369 * @handle: acpi_handle for the target ACPI object 377 * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex).
370 * @flags: driver's requested control bits 378 * @mask: Mask of _OSC bits to request control of, place to store control mask.
379 * @req: Mask of _OSC bits the control of is essential to the caller.
380 *
381 * Run _OSC query for @mask and if that is successful, compare the returned
382 * mask of control bits with @req. If all of the @req bits are set in the
383 * returned mask, run _OSC request for it.
371 * 384 *
372 * Attempt to take control from Firmware on requested control bits. 385 * The variable at the @mask address may be modified regardless of whether or
386 * not the function returns success. On success it will contain the mask of
387 * _OSC bits the BIOS has granted control of, but its contents are meaningless
388 * on failure.
373 **/ 389 **/
374acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags) 390acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
375{ 391{
392 struct acpi_pci_root *root;
376 acpi_status status; 393 acpi_status status;
377 u32 control_req, result, capbuf[3]; 394 u32 ctrl, capbuf[3];
378 acpi_handle tmp; 395 acpi_handle tmp;
379 struct acpi_pci_root *root;
380 396
381 status = acpi_get_handle(handle, "_OSC", &tmp); 397 if (!mask)
382 if (ACPI_FAILURE(status)) 398 return AE_BAD_PARAMETER;
383 return status;
384 399
385 control_req = (flags & OSC_PCI_CONTROL_MASKS); 400 ctrl = *mask & OSC_PCI_CONTROL_MASKS;
386 if (!control_req) 401 if ((ctrl & req) != req)
387 return AE_TYPE; 402 return AE_TYPE;
388 403
389 root = acpi_pci_find_root(handle); 404 root = acpi_pci_find_root(handle);
390 if (!root) 405 if (!root)
391 return AE_NOT_EXIST; 406 return AE_NOT_EXIST;
392 407
408 status = acpi_get_handle(handle, "_OSC", &tmp);
409 if (ACPI_FAILURE(status))
410 return status;
411
393 mutex_lock(&osc_lock); 412 mutex_lock(&osc_lock);
413
414 *mask = ctrl | root->osc_control_set;
394 /* No need to evaluate _OSC if the control was already granted. */ 415 /* No need to evaluate _OSC if the control was already granted. */
395 if ((root->osc_control_set & control_req) == control_req) 416 if ((root->osc_control_set & ctrl) == ctrl)
396 goto out; 417 goto out;
397 418
398 /* Need to query controls first before requesting them */ 419 /* Need to check the available controls bits before requesting them. */
399 if (!root->osc_queried) { 420 while (*mask) {
400 status = acpi_pci_query_osc(root, root->osc_support_set); 421 status = acpi_pci_query_osc(root, root->osc_support_set, mask);
401 if (ACPI_FAILURE(status)) 422 if (ACPI_FAILURE(status))
402 goto out; 423 goto out;
424 if (ctrl == *mask)
425 break;
426 ctrl = *mask;
403 } 427 }
404 if ((root->osc_control_qry & control_req) != control_req) { 428
405 printk(KERN_DEBUG 429 if ((ctrl & req) != req) {
406 "Firmware did not grant requested _OSC control\n");
407 status = AE_SUPPORT; 430 status = AE_SUPPORT;
408 goto out; 431 goto out;
409 } 432 }
410 433
411 capbuf[OSC_QUERY_TYPE] = 0; 434 capbuf[OSC_QUERY_TYPE] = 0;
412 capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set; 435 capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
413 capbuf[OSC_CONTROL_TYPE] = root->osc_control_set | control_req; 436 capbuf[OSC_CONTROL_TYPE] = ctrl;
414 status = acpi_pci_run_osc(handle, capbuf, &result); 437 status = acpi_pci_run_osc(handle, capbuf, mask);
415 if (ACPI_SUCCESS(status)) 438 if (ACPI_SUCCESS(status))
416 root->osc_control_set = result; 439 root->osc_control_set = *mask;
417out: 440out:
418 mutex_unlock(&osc_lock); 441 mutex_unlock(&osc_lock);
419 return status; 442 return status;
@@ -544,14 +567,6 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
544 if (flags != base_flags) 567 if (flags != base_flags)
545 acpi_pci_osc_support(root, flags); 568 acpi_pci_osc_support(root, flags);
546 569
547 status = acpi_pci_osc_control_set(root->device->handle,
548 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
549
550 if (ACPI_FAILURE(status)) {
551 printk(KERN_INFO "Unable to assume PCIe control: Disabling ASPM\n");
552 pcie_no_aspm();
553 }
554
555 pci_acpi_add_bus_pm_notifier(device, root->bus); 570 pci_acpi_add_bus_pm_notifier(device, root->bus);
556 if (device->wakeup.flags.run_wake) 571 if (device->wakeup.flags.run_wake)
557 device_set_run_wake(root->bus->bridge, true); 572 device_set_run_wake(root->bus->bridge, true);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 65e3e2708371..11ec911016c6 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -828,6 +828,7 @@ config PATA_SAMSUNG_CF
828config PATA_WINBOND_VLB 828config PATA_WINBOND_VLB
829 tristate "Winbond W83759A VLB PATA support (Experimental)" 829 tristate "Winbond W83759A VLB PATA support (Experimental)"
830 depends on ISA && EXPERIMENTAL 830 depends on ISA && EXPERIMENTAL
831 select PATA_LEGACY
831 help 832 help
832 Support for the Winbond W83759A controller on Vesa Local Bus 833 Support for the Winbond W83759A controller on Vesa Local Bus
833 systems. 834 systems.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 158eaa961b1e..d5df04a395ca 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -89,7 +89,6 @@ obj-$(CONFIG_PATA_QDI) += pata_qdi.o
89obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o 89obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
90obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o 90obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
91obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o 91obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
92obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o
93 92
94obj-$(CONFIG_PATA_PXA) += pata_pxa.o 93obj-$(CONFIG_PATA_PXA) += pata_pxa.o
95 94
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index fe75d8befc3a..013727b20417 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -60,6 +60,7 @@ enum board_ids {
60 board_ahci, 60 board_ahci,
61 board_ahci_ign_iferr, 61 board_ahci_ign_iferr,
62 board_ahci_nosntf, 62 board_ahci_nosntf,
63 board_ahci_yes_fbs,
63 64
64 /* board IDs for specific chipsets in alphabetical order */ 65 /* board IDs for specific chipsets in alphabetical order */
65 board_ahci_mcp65, 66 board_ahci_mcp65,
@@ -132,6 +133,14 @@ static const struct ata_port_info ahci_port_info[] = {
132 .udma_mask = ATA_UDMA6, 133 .udma_mask = ATA_UDMA6,
133 .port_ops = &ahci_ops, 134 .port_ops = &ahci_ops,
134 }, 135 },
136 [board_ahci_yes_fbs] =
137 {
138 AHCI_HFLAGS (AHCI_HFLAG_YES_FBS),
139 .flags = AHCI_FLAG_COMMON,
140 .pio_mask = ATA_PIO4,
141 .udma_mask = ATA_UDMA6,
142 .port_ops = &ahci_ops,
143 },
135 /* by chipsets */ 144 /* by chipsets */
136 [board_ahci_mcp65] = 145 [board_ahci_mcp65] =
137 { 146 {
@@ -362,6 +371,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
362 /* Marvell */ 371 /* Marvell */
363 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ 372 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
364 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ 373 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
374 { PCI_DEVICE(0x1b4b, 0x9123),
375 .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
365 376
366 /* Promise */ 377 /* Promise */
367 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 378 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 7113c5724471..474427b6f99f 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -209,6 +209,7 @@ enum {
209 link offline */ 209 link offline */
210 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */ 210 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
211 AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */ 211 AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
212 AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
212 213
213 /* ap->flags bits */ 214 /* ap->flags bits */
214 215
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 81e772a94d59..666850d31df2 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -430,6 +430,12 @@ void ahci_save_initial_config(struct device *dev,
430 cap &= ~HOST_CAP_SNTF; 430 cap &= ~HOST_CAP_SNTF;
431 } 431 }
432 432
433 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
434 dev_printk(KERN_INFO, dev,
435 "controller can do FBS, turning on CAP_FBS\n");
436 cap |= HOST_CAP_FBS;
437 }
438
433 if (force_port_map && port_map != force_port_map) { 439 if (force_port_map && port_map != force_port_map) {
434 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n", 440 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
435 port_map, force_port_map); 441 port_map, force_port_map);
@@ -2036,9 +2042,15 @@ static int ahci_port_start(struct ata_port *ap)
2036 u32 cmd = readl(port_mmio + PORT_CMD); 2042 u32 cmd = readl(port_mmio + PORT_CMD);
2037 if (cmd & PORT_CMD_FBSCP) 2043 if (cmd & PORT_CMD_FBSCP)
2038 pp->fbs_supported = true; 2044 pp->fbs_supported = true;
2039 else 2045 else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
2046 dev_printk(KERN_INFO, dev,
2047 "port %d can do FBS, forcing FBSCP\n",
2048 ap->port_no);
2049 pp->fbs_supported = true;
2050 } else
2040 dev_printk(KERN_WARNING, dev, 2051 dev_printk(KERN_WARNING, dev,
2041 "The port is not capable of FBS\n"); 2052 "port %d is not capable of FBS\n",
2053 ap->port_no);
2042 } 2054 }
2043 2055
2044 if (pp->fbs_supported) { 2056 if (pp->fbs_supported) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 7ef7c4f216fa..c035b3d041ee 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5111,15 +5111,18 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5111 qc->flags |= ATA_QCFLAG_ACTIVE; 5111 qc->flags |= ATA_QCFLAG_ACTIVE;
5112 ap->qc_active |= 1 << qc->tag; 5112 ap->qc_active |= 1 << qc->tag;
5113 5113
5114 /* We guarantee to LLDs that they will have at least one 5114 /*
5115 * We guarantee to LLDs that they will have at least one
5115 * non-zero sg if the command is a data command. 5116 * non-zero sg if the command is a data command.
5116 */ 5117 */
5117 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5118 if (WARN_ON_ONCE(ata_is_data(prot) &&
5119 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5120 goto sys_err;
5118 5121
5119 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5122 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5120 (ap->flags & ATA_FLAG_PIO_DMA))) 5123 (ap->flags & ATA_FLAG_PIO_DMA)))
5121 if (ata_sg_setup(qc)) 5124 if (ata_sg_setup(qc))
5122 goto sg_err; 5125 goto sys_err;
5123 5126
5124 /* if device is sleeping, schedule reset and abort the link */ 5127 /* if device is sleeping, schedule reset and abort the link */
5125 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5128 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
@@ -5136,7 +5139,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5136 goto err; 5139 goto err;
5137 return; 5140 return;
5138 5141
5139sg_err: 5142sys_err:
5140 qc->err_mask |= AC_ERR_SYSTEM; 5143 qc->err_mask |= AC_ERR_SYSTEM;
5141err: 5144err:
5142 ata_qc_complete(qc); 5145 ata_qc_complete(qc);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 674c1436491f..3b82d8ef76f0 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2735,10 +2735,6 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2735{ 2735{
2736 struct ata_port *ap = qc->ap; 2736 struct ata_port *ap = qc->ap;
2737 2737
2738 /* see ata_dma_blacklisted() */
2739 BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) &&
2740 qc->tf.protocol == ATAPI_PROT_DMA);
2741
2742 /* defer PIO handling to sff_qc_issue */ 2738 /* defer PIO handling to sff_qc_issue */
2743 if (!ata_is_dma(qc->tf.protocol)) 2739 if (!ata_is_dma(qc->tf.protocol))
2744 return ata_sff_qc_issue(qc); 2740 return ata_sff_qc_issue(qc);
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 9f5da1c7454b..905ff76d3cbb 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
121 121
122 if (pair) { 122 if (pair) {
123 struct ata_timing tp; 123 struct ata_timing tp;
124
125 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0); 124 ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
126 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); 125 ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
127 if (pair->dma_mode) {
128 ata_timing_compute(pair, pair->dma_mode,
129 &tp, T, 0);
130 ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP);
131 }
132 } 126 }
133 } 127 }
134 128
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 9df1ff7e1eaa..eaf194138f21 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -44,6 +44,9 @@
44 * Specific support is included for the ht6560a/ht6560b/opti82c611a/ 44 * Specific support is included for the ht6560a/ht6560b/opti82c611a/
45 * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A 45 * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
46 * 46 *
47 * Support for the Winbond 83759A when operating in advanced mode.
48 * Multichip mode is not currently supported.
49 *
47 * Use the autospeed and pio_mask options with: 50 * Use the autospeed and pio_mask options with:
48 * Appian ADI/2 aka CLPD7220 or AIC25VL01. 51 * Appian ADI/2 aka CLPD7220 or AIC25VL01.
49 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with 52 * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
@@ -135,12 +138,18 @@ static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
135static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */ 138static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
136static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */ 139static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
137static int qdi; /* Set to probe QDI controllers */ 140static int qdi; /* Set to probe QDI controllers */
138static int winbond; /* Set to probe Winbond controllers,
139 give I/O port if non standard */
140static int autospeed; /* Chip present which snoops speed changes */ 141static int autospeed; /* Chip present which snoops speed changes */
141static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ 142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
142static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
143 144
145#ifdef PATA_WINBOND_VLB_MODULE
146static int winbond = 1; /* Set to probe Winbond controllers,
147 give I/O port if non standard */
148#else
149static int winbond; /* Set to probe Winbond controllers,
150 give I/O port if non standard */
151#endif
152
144/** 153/**
145 * legacy_probe_add - Add interface to probe list 154 * legacy_probe_add - Add interface to probe list
146 * @port: Controller port 155 * @port: Controller port
@@ -1297,6 +1306,7 @@ MODULE_AUTHOR("Alan Cox");
1297MODULE_DESCRIPTION("low-level driver for legacy ATA"); 1306MODULE_DESCRIPTION("low-level driver for legacy ATA");
1298MODULE_LICENSE("GPL"); 1307MODULE_LICENSE("GPL");
1299MODULE_VERSION(DRV_VERSION); 1308MODULE_VERSION(DRV_VERSION);
1309MODULE_ALIAS("pata_winbond");
1300 1310
1301module_param(probe_all, int, 0); 1311module_param(probe_all, int, 0);
1302module_param(autospeed, int, 0); 1312module_param(autospeed, int, 0);
@@ -1305,6 +1315,7 @@ module_param(ht6560b, int, 0);
1305module_param(opti82c611a, int, 0); 1315module_param(opti82c611a, int, 0);
1306module_param(opti82c46x, int, 0); 1316module_param(opti82c46x, int, 0);
1307module_param(qdi, int, 0); 1317module_param(qdi, int, 0);
1318module_param(winbond, int, 0);
1308module_param(pio_mask, int, 0); 1319module_param(pio_mask, int, 0);
1309module_param(iordy_mask, int, 0); 1320module_param(iordy_mask, int, 0);
1310 1321
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
deleted file mode 100644
index 6d8619b6f670..000000000000
--- a/drivers/ata/pata_winbond.c
+++ /dev/null
@@ -1,282 +0,0 @@
1/*
2 * pata_winbond.c - Winbond VLB ATA controllers
3 * (C) 2006 Red Hat
4 *
5 * Support for the Winbond 83759A when operating in advanced mode.
6 * Multichip mode is not currently supported.
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/blkdev.h>
13#include <linux/delay.h>
14#include <scsi/scsi_host.h>
15#include <linux/libata.h>
16#include <linux/platform_device.h>
17
18#define DRV_NAME "pata_winbond"
19#define DRV_VERSION "0.0.3"
20
21#define NR_HOST 4 /* Two winbond controllers, two channels each */
22
23struct winbond_data {
24 unsigned long config;
25 struct platform_device *platform_dev;
26};
27
28static struct ata_host *winbond_host[NR_HOST];
29static struct winbond_data winbond_data[NR_HOST];
30static int nr_winbond_host;
31
32#ifdef MODULE
33static int probe_winbond = 1;
34#else
35static int probe_winbond;
36#endif
37
38static DEFINE_SPINLOCK(winbond_lock);
39
40static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
41{
42 unsigned long flags;
43 spin_lock_irqsave(&winbond_lock, flags);
44 outb(reg, port + 0x01);
45 outb(val, port + 0x02);
46 spin_unlock_irqrestore(&winbond_lock, flags);
47}
48
49static u8 winbond_readcfg(unsigned long port, u8 reg)
50{
51 u8 val;
52
53 unsigned long flags;
54 spin_lock_irqsave(&winbond_lock, flags);
55 outb(reg, port + 0x01);
56 val = inb(port + 0x02);
57 spin_unlock_irqrestore(&winbond_lock, flags);
58
59 return val;
60}
61
62static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
63{
64 struct ata_timing t;
65 struct winbond_data *winbond = ap->host->private_data;
66 int active, recovery;
67 u8 reg;
68 int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
69
70 reg = winbond_readcfg(winbond->config, 0x81);
71
72 /* Get the timing data in cycles */
73 if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
74 ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
75 else
76 ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
77
78 active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
79 recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
80 timing = (active << 4) | recovery;
81 winbond_writecfg(winbond->config, timing, reg);
82
83 /* Load the setup timing */
84
85 reg = 0x35;
86 if (adev->class != ATA_DEV_ATA)
87 reg |= 0x08; /* FIFO off */
88 if (!ata_pio_need_iordy(adev))
89 reg |= 0x02; /* IORDY off */
90 reg |= (clamp_val(t.setup, 0, 3) << 6);
91 winbond_writecfg(winbond->config, timing + 1, reg);
92}
93
94
95static unsigned int winbond_data_xfer(struct ata_device *dev,
96 unsigned char *buf, unsigned int buflen, int rw)
97{
98 struct ata_port *ap = dev->link->ap;
99 int slop = buflen & 3;
100
101 if (ata_id_has_dword_io(dev->id)) {
102 if (rw == READ)
103 ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
104 else
105 iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
106
107 if (unlikely(slop)) {
108 __le32 pad;
109 if (rw == READ) {
110 pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
111 memcpy(buf + buflen - slop, &pad, slop);
112 } else {
113 memcpy(&pad, buf + buflen - slop, slop);
114 iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
115 }
116 buflen += 4 - slop;
117 }
118 } else
119 buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
120
121 return buflen;
122}
123
124static struct scsi_host_template winbond_sht = {
125 ATA_PIO_SHT(DRV_NAME),
126};
127
128static struct ata_port_operations winbond_port_ops = {
129 .inherits = &ata_sff_port_ops,
130 .sff_data_xfer = winbond_data_xfer,
131 .cable_detect = ata_cable_40wire,
132 .set_piomode = winbond_set_piomode,
133};
134
135/**
136 * winbond_init_one - attach a winbond interface
137 * @type: Type to display
138 * @io: I/O port start
139 * @irq: interrupt line
140 * @fast: True if on a > 33Mhz VLB
141 *
142 * Register a VLB bus IDE interface. Such interfaces are PIO and we
143 * assume do not support IRQ sharing.
144 */
145
146static __init int winbond_init_one(unsigned long port)
147{
148 struct platform_device *pdev;
149 u8 reg;
150 int i, rc;
151
152 reg = winbond_readcfg(port, 0x81);
153 reg |= 0x80; /* jumpered mode off */
154 winbond_writecfg(port, 0x81, reg);
155 reg = winbond_readcfg(port, 0x83);
156 reg |= 0xF0; /* local control */
157 winbond_writecfg(port, 0x83, reg);
158 reg = winbond_readcfg(port, 0x85);
159 reg |= 0xF0; /* programmable timing */
160 winbond_writecfg(port, 0x85, reg);
161
162 reg = winbond_readcfg(port, 0x81);
163
164 if (!(reg & 0x03)) /* Disabled */
165 return -ENODEV;
166
167 for (i = 0; i < 2 ; i ++) {
168 unsigned long cmd_port = 0x1F0 - (0x80 * i);
169 unsigned long ctl_port = cmd_port + 0x206;
170 struct ata_host *host;
171 struct ata_port *ap;
172 void __iomem *cmd_addr, *ctl_addr;
173
174 if (!(reg & (1 << i)))
175 continue;
176
177 pdev = platform_device_register_simple(DRV_NAME, nr_winbond_host, NULL, 0);
178 if (IS_ERR(pdev))
179 return PTR_ERR(pdev);
180
181 rc = -ENOMEM;
182 host = ata_host_alloc(&pdev->dev, 1);
183 if (!host)
184 goto err_unregister;
185 ap = host->ports[0];
186
187 rc = -ENOMEM;
188 cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
189 ctl_addr = devm_ioport_map(&pdev->dev, ctl_port, 1);
190 if (!cmd_addr || !ctl_addr)
191 goto err_unregister;
192
193 ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", cmd_port, ctl_port);
194
195 ap->ops = &winbond_port_ops;
196 ap->pio_mask = ATA_PIO4;
197 ap->flags |= ATA_FLAG_SLAVE_POSS;
198 ap->ioaddr.cmd_addr = cmd_addr;
199 ap->ioaddr.altstatus_addr = ctl_addr;
200 ap->ioaddr.ctl_addr = ctl_addr;
201 ata_sff_std_ports(&ap->ioaddr);
202
203 /* hook in a private data structure per channel */
204 host->private_data = &winbond_data[nr_winbond_host];
205 winbond_data[nr_winbond_host].config = port;
206 winbond_data[nr_winbond_host].platform_dev = pdev;
207
208 /* activate */
209 rc = ata_host_activate(host, 14 + i, ata_sff_interrupt, 0,
210 &winbond_sht);
211 if (rc)
212 goto err_unregister;
213
214 winbond_host[nr_winbond_host++] = dev_get_drvdata(&pdev->dev);
215 }
216
217 return 0;
218
219 err_unregister:
220 platform_device_unregister(pdev);
221 return rc;
222}
223
224/**
225 * winbond_init - attach winbond interfaces
226 *
227 * Attach winbond IDE interfaces by scanning the ports it may occupy.
228 */
229
230static __init int winbond_init(void)
231{
232 static const unsigned long config[2] = { 0x130, 0x1B0 };
233
234 int ct = 0;
235 int i;
236
237 if (probe_winbond == 0)
238 return -ENODEV;
239
240 /*
241 * Check both base addresses
242 */
243
244 for (i = 0; i < 2; i++) {
245 if (probe_winbond & (1<<i)) {
246 int ret = 0;
247 unsigned long port = config[i];
248
249 if (request_region(port, 2, "pata_winbond")) {
250 ret = winbond_init_one(port);
251 if (ret <= 0)
252 release_region(port, 2);
253 else ct+= ret;
254 }
255 }
256 }
257 if (ct != 0)
258 return 0;
259 return -ENODEV;
260}
261
262static __exit void winbond_exit(void)
263{
264 int i;
265
266 for (i = 0; i < nr_winbond_host; i++) {
267 ata_host_detach(winbond_host[i]);
268 release_region(winbond_data[i].config, 2);
269 platform_device_unregister(winbond_data[i].platform_dev);
270 }
271}
272
273MODULE_AUTHOR("Alan Cox");
274MODULE_DESCRIPTION("low-level driver for Winbond VL ATA");
275MODULE_LICENSE("GPL");
276MODULE_VERSION(DRV_VERSION);
277
278module_init(winbond_init);
279module_exit(winbond_exit);
280
281module_param(probe_winbond, int, 0);
282
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index ea24c1e51be2..6cf57c5c2b5f 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1459,7 +1459,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1459{ 1459{
1460 struct scatterlist *sg = qc->sg; 1460 struct scatterlist *sg = qc->sg;
1461 struct ata_port *ap = qc->ap; 1461 struct ata_port *ap = qc->ap;
1462 u32 dma_chan; 1462 int dma_chan;
1463 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); 1463 struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1464 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); 1464 struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1465 int err; 1465 int err;
@@ -1588,7 +1588,7 @@ static const struct ata_port_info sata_dwc_port_info[] = {
1588 }, 1588 },
1589}; 1589};
1590 1590
1591static int sata_dwc_probe(struct of_device *ofdev, 1591static int sata_dwc_probe(struct platform_device *ofdev,
1592 const struct of_device_id *match) 1592 const struct of_device_id *match)
1593{ 1593{
1594 struct sata_dwc_device *hsdev; 1594 struct sata_dwc_device *hsdev;
@@ -1702,7 +1702,7 @@ error_out:
1702 return err; 1702 return err;
1703} 1703}
1704 1704
1705static int sata_dwc_remove(struct of_device *ofdev) 1705static int sata_dwc_remove(struct platform_device *ofdev)
1706{ 1706{
1707 struct device *dev = &ofdev->dev; 1707 struct device *dev = &ofdev->dev;
1708 struct ata_host *host = dev_get_drvdata(dev); 1708 struct ata_host *host = dev_get_drvdata(dev);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9463c71dd38e..81982594a014 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
1898 * LOCKING: 1898 * LOCKING:
1899 * Inherited from caller. 1899 * Inherited from caller.
1900 */ 1900 */
1901static void mv_bmdma_stop(struct ata_queued_cmd *qc) 1901static void mv_bmdma_stop_ap(struct ata_port *ap)
1902{ 1902{
1903 struct ata_port *ap = qc->ap;
1904 void __iomem *port_mmio = mv_ap_base(ap); 1903 void __iomem *port_mmio = mv_ap_base(ap);
1905 u32 cmd; 1904 u32 cmd;
1906 1905
1907 /* clear start/stop bit */ 1906 /* clear start/stop bit */
1908 cmd = readl(port_mmio + BMDMA_CMD); 1907 cmd = readl(port_mmio + BMDMA_CMD);
1909 cmd &= ~ATA_DMA_START; 1908 if (cmd & ATA_DMA_START) {
1910 writelfl(cmd, port_mmio + BMDMA_CMD); 1909 cmd &= ~ATA_DMA_START;
1910 writelfl(cmd, port_mmio + BMDMA_CMD);
1911
1912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1913 ata_sff_dma_pause(ap);
1914 }
1915}
1911 1916
1912 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 1917static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1913 ata_sff_dma_pause(ap); 1918{
1919 mv_bmdma_stop_ap(qc->ap);
1914} 1920}
1915 1921
1916/** 1922/**
@@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
1934 reg = readl(port_mmio + BMDMA_STATUS); 1940 reg = readl(port_mmio + BMDMA_STATUS);
1935 if (reg & ATA_DMA_ACTIVE) 1941 if (reg & ATA_DMA_ACTIVE)
1936 status = ATA_DMA_ACTIVE; 1942 status = ATA_DMA_ACTIVE;
1937 else 1943 else if (reg & ATA_DMA_ERR)
1938 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; 1944 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1945 else {
1946 /*
1947 * Just because DMA_ACTIVE is 0 (DMA completed),
1948 * this does _not_ mean the device is "done".
1949 * So we should not yet be signalling ATA_DMA_INTR
1950 * in some cases. Eg. DSM/TRIM, and perhaps others.
1951 */
1952 mv_bmdma_stop_ap(ap);
1953 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1954 status = 0;
1955 else
1956 status = ATA_DMA_INTR;
1957 }
1939 return status; 1958 return status;
1940} 1959}
1941 1960
@@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
1995 2014
1996 switch (tf->protocol) { 2015 switch (tf->protocol) {
1997 case ATA_PROT_DMA: 2016 case ATA_PROT_DMA:
2017 if (tf->command == ATA_CMD_DSM)
2018 return;
2019 /* fall-thru */
1998 case ATA_PROT_NCQ: 2020 case ATA_PROT_NCQ:
1999 break; /* continue below */ 2021 break; /* continue below */
2000 case ATA_PROT_PIO: 2022 case ATA_PROT_PIO:
@@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2094 if ((tf->protocol != ATA_PROT_DMA) && 2116 if ((tf->protocol != ATA_PROT_DMA) &&
2095 (tf->protocol != ATA_PROT_NCQ)) 2117 (tf->protocol != ATA_PROT_NCQ))
2096 return; 2118 return;
2119 if (tf->command == ATA_CMD_DSM)
2120 return; /* use bmdma for this */
2097 2121
2098 /* Fill in Gen IIE command request block */ 2122 /* Fill in Gen IIE command request block */
2099 if (!(tf->flags & ATA_TFLAG_WRITE)) 2123 if (!(tf->flags & ATA_TFLAG_WRITE))
@@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2289 2313
2290 switch (qc->tf.protocol) { 2314 switch (qc->tf.protocol) {
2291 case ATA_PROT_DMA: 2315 case ATA_PROT_DMA:
2316 if (qc->tf.command == ATA_CMD_DSM) {
2317 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2318 return AC_ERR_OTHER;
2319 break; /* use bmdma for this */
2320 }
2321 /* fall thru */
2292 case ATA_PROT_NCQ: 2322 case ATA_PROT_NCQ:
2293 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); 2323 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2294 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; 2324 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index c8a44f5e0584..40af43ebd92d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -568,7 +568,7 @@ static int _request_firmware(const struct firmware **firmware_p,
568out: 568out:
569 if (retval) { 569 if (retval) {
570 release_firmware(firmware); 570 release_firmware(firmware);
571 firmware_p = NULL; 571 *firmware_p = NULL;
572 } 572 }
573 573
574 return retval; 574 return retval;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ac1b682edecb..ab735a605cf3 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -834,7 +834,7 @@ static int blkfront_probe(struct xenbus_device *dev,
834 char *type; 834 char *type;
835 int len; 835 int len;
836 /* no unplug has been done: do not hook devices != xen vbds */ 836 /* no unplug has been done: do not hook devices != xen vbds */
837 if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) { 837 if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
838 int major; 838 int major;
839 839
840 if (!VDEV_IS_EXTENDED(vdevice)) 840 if (!VDEV_IS_EXTENDED(vdevice))
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 2982b3ee9465..057413bb16e2 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -94,6 +94,7 @@
94#include <linux/hdreg.h> 94#include <linux/hdreg.h>
95#include <linux/platform_device.h> 95#include <linux/platform_device.h>
96#if defined(CONFIG_OF) 96#if defined(CONFIG_OF)
97#include <linux/of_address.h>
97#include <linux/of_device.h> 98#include <linux/of_device.h>
98#include <linux/of_platform.h> 99#include <linux/of_platform.h>
99#endif 100#endif
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index ddf5def1b0da..eab58db5f91c 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,6 +12,7 @@
12#include <asm/smp.h> 12#include <asm/smp.h>
13#include "agp.h" 13#include "agp.h"
14#include "intel-agp.h" 14#include "intel-agp.h"
15#include <linux/intel-gtt.h>
15 16
16#include "intel-gtt.c" 17#include "intel-gtt.c"
17 18
@@ -815,9 +816,19 @@ static const struct intel_driver_description {
815 "HD Graphics", NULL, &intel_i965_driver }, 816 "HD Graphics", NULL, &intel_i965_driver },
816 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 817 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
817 "HD Graphics", NULL, &intel_i965_driver }, 818 "HD Graphics", NULL, &intel_i965_driver },
818 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 819 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
819 "Sandybridge", NULL, &intel_gen6_driver }, 820 "Sandybridge", NULL, &intel_gen6_driver },
820 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 821 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
822 "Sandybridge", NULL, &intel_gen6_driver },
823 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
824 "Sandybridge", NULL, &intel_gen6_driver },
825 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
826 "Sandybridge", NULL, &intel_gen6_driver },
827 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
828 "Sandybridge", NULL, &intel_gen6_driver },
829 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
830 "Sandybridge", NULL, &intel_gen6_driver },
831 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
821 "Sandybridge", NULL, &intel_gen6_driver }, 832 "Sandybridge", NULL, &intel_gen6_driver },
822 { 0, 0, NULL, NULL, NULL } 833 { 0, 0, NULL, NULL, NULL }
823}; 834};
@@ -825,7 +836,8 @@ static const struct intel_driver_description {
825static int __devinit intel_gmch_probe(struct pci_dev *pdev, 836static int __devinit intel_gmch_probe(struct pci_dev *pdev,
826 struct agp_bridge_data *bridge) 837 struct agp_bridge_data *bridge)
827{ 838{
828 int i; 839 int i, mask;
840
829 bridge->driver = NULL; 841 bridge->driver = NULL;
830 842
831 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { 843 for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -845,14 +857,19 @@ static int __devinit intel_gmch_probe(struct pci_dev *pdev,
845 857
846 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); 858 dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
847 859
848 if (bridge->driver->mask_memory == intel_i965_mask_memory) { 860 if (bridge->driver->mask_memory == intel_gen6_mask_memory)
849 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) 861 mask = 40;
850 dev_err(&intel_private.pcidev->dev, 862 else if (bridge->driver->mask_memory == intel_i965_mask_memory)
851 "set gfx device dma mask 36bit failed!\n"); 863 mask = 36;
852 else 864 else
853 pci_set_consistent_dma_mask(intel_private.pcidev, 865 mask = 32;
854 DMA_BIT_MASK(36)); 866
855 } 867 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
868 dev_err(&intel_private.pcidev->dev,
869 "set gfx device dma mask %d-bit failed!\n", mask);
870 else
871 pci_set_consistent_dma_mask(intel_private.pcidev,
872 DMA_BIT_MASK(mask));
856 873
857 return 1; 874 return 1;
858} 875}
@@ -1036,6 +1053,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
1036 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 1053 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
1037 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), 1054 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
1038 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), 1055 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
1056 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
1039 { } 1057 { }
1040}; 1058};
1041 1059
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c05e3e518268..ee189c74d345 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -1,6 +1,8 @@
1/* 1/*
2 * Common Intel AGPGART and GTT definitions. 2 * Common Intel AGPGART and GTT definitions.
3 */ 3 */
4#ifndef _INTEL_AGP_H
5#define _INTEL_AGP_H
4 6
5/* Intel registers */ 7/* Intel registers */
6#define INTEL_APSIZE 0xb4 8#define INTEL_APSIZE 0xb4
@@ -200,10 +202,16 @@
200#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 202#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
201#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a 203#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
202#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 204#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
203#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 205#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
204#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 206#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
205#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 207#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
206#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 208#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
209#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
210#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
211#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
212#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
213#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
214#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
207 215
208/* cover 915 and 945 variants */ 216/* cover 915 and 945 variants */
209#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 217#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -230,7 +238,8 @@
230 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) 238 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
231 239
232#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ 240#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
233 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) 241 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
242 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
234 243
235#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ 244#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
236 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 245 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@@ -243,3 +252,5 @@
243 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ 252 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
244 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ 253 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
245 IS_SNB) 254 IS_SNB)
255
256#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index d22ffb811bf2..75e0a3497888 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -49,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] =
49 .type = INTEL_AGP_CACHED_MEMORY} 49 .type = INTEL_AGP_CACHED_MEMORY}
50}; 50};
51 51
52#define INTEL_AGP_UNCACHED_MEMORY 0
53#define INTEL_AGP_CACHED_MEMORY_LLC 1
54#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
55#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
56#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
57
58static struct gatt_mask intel_gen6_masks[] =
59{
60 {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
61 .type = INTEL_AGP_UNCACHED_MEMORY },
62 {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
63 .type = INTEL_AGP_CACHED_MEMORY_LLC },
64 {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
65 .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
66 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
67 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
68 {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
69 .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
70};
71
52static struct _intel_private { 72static struct _intel_private {
53 struct pci_dev *pcidev; /* device one */ 73 struct pci_dev *pcidev; /* device one */
54 u8 __iomem *registers; 74 u8 __iomem *registers;
@@ -178,13 +198,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
178 off_t pg_start, int mask_type) 198 off_t pg_start, int mask_type)
179{ 199{
180 int i, j; 200 int i, j;
181 u32 cache_bits = 0;
182
183 if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
184 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
185 {
186 cache_bits = GEN6_PTE_LLC_MLC;
187 }
188 201
189 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 202 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
190 writel(agp_bridge->driver->mask_memory(agp_bridge, 203 writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -317,6 +330,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
317 return 0; 330 return 0;
318} 331}
319 332
333static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
334 int type)
335{
336 unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
337 unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
338
339 if (type_mask == AGP_USER_UNCACHED_MEMORY)
340 return INTEL_AGP_UNCACHED_MEMORY;
341 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
342 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
343 INTEL_AGP_CACHED_MEMORY_LLC_MLC;
344 else /* set 'normal'/'cached' to LLC by default */
345 return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
346 INTEL_AGP_CACHED_MEMORY_LLC;
347}
348
349
320static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 350static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
321 int type) 351 int type)
322{ 352{
@@ -588,8 +618,7 @@ static void intel_i830_init_gtt_entries(void)
588 gtt_entries = 0; 618 gtt_entries = 0;
589 break; 619 break;
590 } 620 }
591 } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || 621 } else if (IS_SNB) {
592 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
593 /* 622 /*
594 * SandyBridge has new memory control reg at 0x50.w 623 * SandyBridge has new memory control reg at 0x50.w
595 */ 624 */
@@ -1068,11 +1097,11 @@ static void intel_i9xx_setup_flush(void)
1068 intel_i915_setup_chipset_flush(); 1097 intel_i915_setup_chipset_flush();
1069 } 1098 }
1070 1099
1071 if (intel_private.ifp_resource.start) { 1100 if (intel_private.ifp_resource.start)
1072 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); 1101 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1073 if (!intel_private.i9xx_flush_page) 1102 if (!intel_private.i9xx_flush_page)
1074 dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); 1103 dev_err(&intel_private.pcidev->dev,
1075 } 1104 "can't ioremap flush page - no chipset flushing\n");
1076} 1105}
1077 1106
1078static int intel_i9xx_configure(void) 1107static int intel_i9xx_configure(void)
@@ -1163,7 +1192,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1163 1192
1164 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 1193 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1165 1194
1166 if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && 1195 if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1167 mask_type != INTEL_AGP_CACHED_MEMORY) 1196 mask_type != INTEL_AGP_CACHED_MEMORY)
1168 goto out_err; 1197 goto out_err;
1169 1198
@@ -1333,8 +1362,8 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1333static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, 1362static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge,
1334 dma_addr_t addr, int type) 1363 dma_addr_t addr, int type)
1335{ 1364{
1336 /* Shift high bits down */ 1365 /* gen6 has bit11-4 for physical addr bit39-32 */
1337 addr |= (addr >> 28) & 0xff; 1366 addr |= (addr >> 28) & 0xff0;
1338 1367
1339 /* Type checking must be done elsewhere */ 1368 /* Type checking must be done elsewhere */
1340 return addr | bridge->driver->masks[type].mask; 1369 return addr | bridge->driver->masks[type].mask;
@@ -1359,6 +1388,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1359 break; 1388 break;
1360 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: 1389 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1361 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: 1390 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1391 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB:
1362 *gtt_offset = MB(2); 1392 *gtt_offset = MB(2);
1363 1393
1364 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 1394 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -1563,7 +1593,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
1563 .fetch_size = intel_i9xx_fetch_size, 1593 .fetch_size = intel_i9xx_fetch_size,
1564 .cleanup = intel_i915_cleanup, 1594 .cleanup = intel_i915_cleanup,
1565 .mask_memory = intel_gen6_mask_memory, 1595 .mask_memory = intel_gen6_mask_memory,
1566 .masks = intel_i810_masks, 1596 .masks = intel_gen6_masks,
1567 .agp_enable = intel_i810_agp_enable, 1597 .agp_enable = intel_i810_agp_enable,
1568 .cache_flush = global_cache_flush, 1598 .cache_flush = global_cache_flush,
1569 .create_gatt_table = intel_i965_create_gatt_table, 1599 .create_gatt_table = intel_i965_create_gatt_table,
@@ -1576,7 +1606,7 @@ static const struct agp_bridge_driver intel_gen6_driver = {
1576 .agp_alloc_pages = agp_generic_alloc_pages, 1606 .agp_alloc_pages = agp_generic_alloc_pages,
1577 .agp_destroy_page = agp_generic_destroy_page, 1607 .agp_destroy_page = agp_generic_destroy_page,
1578 .agp_destroy_pages = agp_generic_destroy_pages, 1608 .agp_destroy_pages = agp_generic_destroy_pages,
1579 .agp_type_to_mask_type = intel_i830_type_to_mask_type, 1609 .agp_type_to_mask_type = intel_gen6_type_to_mask_type,
1580 .chipset_flush = intel_i915_chipset_flush, 1610 .chipset_flush = intel_i915_chipset_flush,
1581#ifdef USE_PCI_DMA_API 1611#ifdef USE_PCI_DMA_API
1582 .agp_map_page = intel_agp_map_page, 1612 .agp_map_page = intel_agp_map_page,
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index e0249722d25f..f953c96efc86 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -159,7 +159,7 @@ static void hangcheck_fire(unsigned long data)
159 if (hangcheck_dump_tasks) { 159 if (hangcheck_dump_tasks) {
160 printk(KERN_CRIT "Hangcheck: Task state:\n"); 160 printk(KERN_CRIT "Hangcheck: Task state:\n");
161#ifdef CONFIG_MAGIC_SYSRQ 161#ifdef CONFIG_MAGIC_SYSRQ
162 handle_sysrq('t', NULL); 162 handle_sysrq('t');
163#endif /* CONFIG_MAGIC_SYSRQ */ 163#endif /* CONFIG_MAGIC_SYSRQ */
164 } 164 }
165 if (hangcheck_reboot) { 165 if (hangcheck_reboot) {
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index fa27d1676ee5..3afd62e856eb 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -651,7 +651,7 @@ int hvc_poll(struct hvc_struct *hp)
651 if (sysrq_pressed) 651 if (sysrq_pressed)
652 continue; 652 continue;
653 } else if (sysrq_pressed) { 653 } else if (sysrq_pressed) {
654 handle_sysrq(buf[i], tty); 654 handle_sysrq(buf[i]);
655 sysrq_pressed = 0; 655 sysrq_pressed = 0;
656 continue; 656 continue;
657 } 657 }
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 1f4b6de65a2d..a2bc885ce60a 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -403,7 +403,7 @@ static void hvsi_insert_chars(struct hvsi_struct *hp, const char *buf, int len)
403 hp->sysrq = 1; 403 hp->sysrq = 1;
404 continue; 404 continue;
405 } else if (hp->sysrq) { 405 } else if (hp->sysrq) {
406 handle_sysrq(c, hp->tty); 406 handle_sysrq(c);
407 hp->sysrq = 0; 407 hp->sysrq = 0;
408 continue; 408 continue;
409 } 409 }
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index 1acdb2509511..a3f5e381e746 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -387,7 +387,7 @@ static int n2rng_init_control(struct n2rng *np)
387 387
388static int n2rng_data_read(struct hwrng *rng, u32 *data) 388static int n2rng_data_read(struct hwrng *rng, u32 *data)
389{ 389{
390 struct n2rng *np = rng->priv; 390 struct n2rng *np = (struct n2rng *) rng->priv;
391 unsigned long ra = __pa(&np->test_data); 391 unsigned long ra = __pa(&np->test_data);
392 int len; 392 int len;
393 393
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 07f3ea38b582..d4b71e8d0d23 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -1650,7 +1650,7 @@ ip2_close( PTTY tty, struct file *pFile )
1650 /* disable DSS reporting */ 1650 /* disable DSS reporting */
1651 i2QueueCommands(PTYPE_INLINE, pCh, 100, 4, 1651 i2QueueCommands(PTYPE_INLINE, pCh, 100, 4,
1652 CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP); 1652 CMD_DCD_NREP, CMD_CTS_NREP, CMD_DSR_NREP, CMD_RI_NREP);
1653 if ( !tty || (tty->termios->c_cflag & HUPCL) ) { 1653 if (tty->termios->c_cflag & HUPCL) {
1654 i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN); 1654 i2QueueCommands(PTYPE_INLINE, pCh, 100, 2, CMD_RTSDN, CMD_DTRDN);
1655 pCh->dataSetOut &= ~(I2_DTR | I2_RTS); 1655 pCh->dataSetOut &= ~(I2_DTR | I2_RTS);
1656 i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25)); 1656 i2QueueCommands( PTYPE_INLINE, pCh, 100, 1, CMD_PAUSE(25));
@@ -2930,6 +2930,8 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg )
2930 if ( pCh ) 2930 if ( pCh )
2931 { 2931 {
2932 rc = copy_to_user(argp, pCh, sizeof(i2ChanStr)); 2932 rc = copy_to_user(argp, pCh, sizeof(i2ChanStr));
2933 if (rc)
2934 rc = -EFAULT;
2933 } else { 2935 } else {
2934 rc = -ENODEV; 2936 rc = -ENODEV;
2935 } 2937 }
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index ad46eae1f9bb..c350d01716bd 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -675,8 +675,8 @@ static int ptmx_open(struct inode *inode, struct file *filp)
675 } 675 }
676 676
677 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 677 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
678 filp->private_data = tty; 678
679 file_move(filp, &tty->tty_files); 679 tty_add_file(tty, filp);
680 680
681 retval = devpts_pty_new(inode, tty->link); 681 retval = devpts_pty_new(inode, tty->link);
682 if (retval) 682 if (retval)
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index 79c3bc69165a..7c79d243acc9 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -1244,6 +1244,7 @@ static int set_config(struct tty_struct *tty, struct r_port *info,
1244 } 1244 }
1245 info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK)); 1245 info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK));
1246 configure_r_port(tty, info, NULL); 1246 configure_r_port(tty, info, NULL);
1247 mutex_unlock(&info->port.mutex);
1247 return 0; 1248 return 0;
1248 } 1249 }
1249 1250
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index fef80cfcab5c..e63b830c86cc 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -691,8 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
691 if (info->port.count == 1) { 691 if (info->port.count == 1) {
692 /* 1st open on this device, init hardware */ 692 /* 1st open on this device, init hardware */
693 retval = startup(info); 693 retval = startup(info);
694 if (retval < 0) 694 if (retval < 0) {
695 mutex_unlock(&info->port.mutex);
695 goto cleanup; 696 goto cleanup;
697 }
696 } 698 }
697 mutex_unlock(&info->port.mutex); 699 mutex_unlock(&info->port.mutex);
698 retval = block_til_ready(tty, filp, info); 700 retval = block_til_ready(tty, filp, info);
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c
index 878ac0c2cc68..ef31bb81e843 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/char/sysrq.c
@@ -18,7 +18,6 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/tty.h>
22#include <linux/mount.h> 21#include <linux/mount.h>
23#include <linux/kdev_t.h> 22#include <linux/kdev_t.h>
24#include <linux/major.h> 23#include <linux/major.h>
@@ -76,7 +75,7 @@ static int __init sysrq_always_enabled_setup(char *str)
76__setup("sysrq_always_enabled", sysrq_always_enabled_setup); 75__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
77 76
78 77
79static void sysrq_handle_loglevel(int key, struct tty_struct *tty) 78static void sysrq_handle_loglevel(int key)
80{ 79{
81 int i; 80 int i;
82 81
@@ -93,7 +92,7 @@ static struct sysrq_key_op sysrq_loglevel_op = {
93}; 92};
94 93
95#ifdef CONFIG_VT 94#ifdef CONFIG_VT
96static void sysrq_handle_SAK(int key, struct tty_struct *tty) 95static void sysrq_handle_SAK(int key)
97{ 96{
98 struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work; 97 struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
99 schedule_work(SAK_work); 98 schedule_work(SAK_work);
@@ -109,7 +108,7 @@ static struct sysrq_key_op sysrq_SAK_op = {
109#endif 108#endif
110 109
111#ifdef CONFIG_VT 110#ifdef CONFIG_VT
112static void sysrq_handle_unraw(int key, struct tty_struct *tty) 111static void sysrq_handle_unraw(int key)
113{ 112{
114 struct kbd_struct *kbd = &kbd_table[fg_console]; 113 struct kbd_struct *kbd = &kbd_table[fg_console];
115 114
@@ -126,7 +125,7 @@ static struct sysrq_key_op sysrq_unraw_op = {
126#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL) 125#define sysrq_unraw_op (*(struct sysrq_key_op *)NULL)
127#endif /* CONFIG_VT */ 126#endif /* CONFIG_VT */
128 127
129static void sysrq_handle_crash(int key, struct tty_struct *tty) 128static void sysrq_handle_crash(int key)
130{ 129{
131 char *killer = NULL; 130 char *killer = NULL;
132 131
@@ -141,7 +140,7 @@ static struct sysrq_key_op sysrq_crash_op = {
141 .enable_mask = SYSRQ_ENABLE_DUMP, 140 .enable_mask = SYSRQ_ENABLE_DUMP,
142}; 141};
143 142
144static void sysrq_handle_reboot(int key, struct tty_struct *tty) 143static void sysrq_handle_reboot(int key)
145{ 144{
146 lockdep_off(); 145 lockdep_off();
147 local_irq_enable(); 146 local_irq_enable();
@@ -154,7 +153,7 @@ static struct sysrq_key_op sysrq_reboot_op = {
154 .enable_mask = SYSRQ_ENABLE_BOOT, 153 .enable_mask = SYSRQ_ENABLE_BOOT,
155}; 154};
156 155
157static void sysrq_handle_sync(int key, struct tty_struct *tty) 156static void sysrq_handle_sync(int key)
158{ 157{
159 emergency_sync(); 158 emergency_sync();
160} 159}
@@ -165,7 +164,7 @@ static struct sysrq_key_op sysrq_sync_op = {
165 .enable_mask = SYSRQ_ENABLE_SYNC, 164 .enable_mask = SYSRQ_ENABLE_SYNC,
166}; 165};
167 166
168static void sysrq_handle_show_timers(int key, struct tty_struct *tty) 167static void sysrq_handle_show_timers(int key)
169{ 168{
170 sysrq_timer_list_show(); 169 sysrq_timer_list_show();
171} 170}
@@ -176,7 +175,7 @@ static struct sysrq_key_op sysrq_show_timers_op = {
176 .action_msg = "Show clockevent devices & pending hrtimers (no others)", 175 .action_msg = "Show clockevent devices & pending hrtimers (no others)",
177}; 176};
178 177
179static void sysrq_handle_mountro(int key, struct tty_struct *tty) 178static void sysrq_handle_mountro(int key)
180{ 179{
181 emergency_remount(); 180 emergency_remount();
182} 181}
@@ -188,7 +187,7 @@ static struct sysrq_key_op sysrq_mountro_op = {
188}; 187};
189 188
190#ifdef CONFIG_LOCKDEP 189#ifdef CONFIG_LOCKDEP
191static void sysrq_handle_showlocks(int key, struct tty_struct *tty) 190static void sysrq_handle_showlocks(int key)
192{ 191{
193 debug_show_all_locks(); 192 debug_show_all_locks();
194} 193}
@@ -226,7 +225,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy)
226 225
227static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); 226static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
228 227
229static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) 228static void sysrq_handle_showallcpus(int key)
230{ 229{
231 /* 230 /*
232 * Fall back to the workqueue based printing if the 231 * Fall back to the workqueue based printing if the
@@ -252,7 +251,7 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
252}; 251};
253#endif 252#endif
254 253
255static void sysrq_handle_showregs(int key, struct tty_struct *tty) 254static void sysrq_handle_showregs(int key)
256{ 255{
257 struct pt_regs *regs = get_irq_regs(); 256 struct pt_regs *regs = get_irq_regs();
258 if (regs) 257 if (regs)
@@ -266,7 +265,7 @@ static struct sysrq_key_op sysrq_showregs_op = {
266 .enable_mask = SYSRQ_ENABLE_DUMP, 265 .enable_mask = SYSRQ_ENABLE_DUMP,
267}; 266};
268 267
269static void sysrq_handle_showstate(int key, struct tty_struct *tty) 268static void sysrq_handle_showstate(int key)
270{ 269{
271 show_state(); 270 show_state();
272} 271}
@@ -277,7 +276,7 @@ static struct sysrq_key_op sysrq_showstate_op = {
277 .enable_mask = SYSRQ_ENABLE_DUMP, 276 .enable_mask = SYSRQ_ENABLE_DUMP,
278}; 277};
279 278
280static void sysrq_handle_showstate_blocked(int key, struct tty_struct *tty) 279static void sysrq_handle_showstate_blocked(int key)
281{ 280{
282 show_state_filter(TASK_UNINTERRUPTIBLE); 281 show_state_filter(TASK_UNINTERRUPTIBLE);
283} 282}
@@ -291,7 +290,7 @@ static struct sysrq_key_op sysrq_showstate_blocked_op = {
291#ifdef CONFIG_TRACING 290#ifdef CONFIG_TRACING
292#include <linux/ftrace.h> 291#include <linux/ftrace.h>
293 292
294static void sysrq_ftrace_dump(int key, struct tty_struct *tty) 293static void sysrq_ftrace_dump(int key)
295{ 294{
296 ftrace_dump(DUMP_ALL); 295 ftrace_dump(DUMP_ALL);
297} 296}
@@ -305,7 +304,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
305#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL) 304#define sysrq_ftrace_dump_op (*(struct sysrq_key_op *)NULL)
306#endif 305#endif
307 306
308static void sysrq_handle_showmem(int key, struct tty_struct *tty) 307static void sysrq_handle_showmem(int key)
309{ 308{
310 show_mem(); 309 show_mem();
311} 310}
@@ -330,7 +329,7 @@ static void send_sig_all(int sig)
330 } 329 }
331} 330}
332 331
333static void sysrq_handle_term(int key, struct tty_struct *tty) 332static void sysrq_handle_term(int key)
334{ 333{
335 send_sig_all(SIGTERM); 334 send_sig_all(SIGTERM);
336 console_loglevel = 8; 335 console_loglevel = 8;
@@ -349,7 +348,7 @@ static void moom_callback(struct work_struct *ignored)
349 348
350static DECLARE_WORK(moom_work, moom_callback); 349static DECLARE_WORK(moom_work, moom_callback);
351 350
352static void sysrq_handle_moom(int key, struct tty_struct *tty) 351static void sysrq_handle_moom(int key)
353{ 352{
354 schedule_work(&moom_work); 353 schedule_work(&moom_work);
355} 354}
@@ -361,7 +360,7 @@ static struct sysrq_key_op sysrq_moom_op = {
361}; 360};
362 361
363#ifdef CONFIG_BLOCK 362#ifdef CONFIG_BLOCK
364static void sysrq_handle_thaw(int key, struct tty_struct *tty) 363static void sysrq_handle_thaw(int key)
365{ 364{
366 emergency_thaw_all(); 365 emergency_thaw_all();
367} 366}
@@ -373,7 +372,7 @@ static struct sysrq_key_op sysrq_thaw_op = {
373}; 372};
374#endif 373#endif
375 374
376static void sysrq_handle_kill(int key, struct tty_struct *tty) 375static void sysrq_handle_kill(int key)
377{ 376{
378 send_sig_all(SIGKILL); 377 send_sig_all(SIGKILL);
379 console_loglevel = 8; 378 console_loglevel = 8;
@@ -385,7 +384,7 @@ static struct sysrq_key_op sysrq_kill_op = {
385 .enable_mask = SYSRQ_ENABLE_SIGNAL, 384 .enable_mask = SYSRQ_ENABLE_SIGNAL,
386}; 385};
387 386
388static void sysrq_handle_unrt(int key, struct tty_struct *tty) 387static void sysrq_handle_unrt(int key)
389{ 388{
390 normalize_rt_tasks(); 389 normalize_rt_tasks();
391} 390}
@@ -493,7 +492,7 @@ static void __sysrq_put_key_op(int key, struct sysrq_key_op *op_p)
493 sysrq_key_table[i] = op_p; 492 sysrq_key_table[i] = op_p;
494} 493}
495 494
496void __handle_sysrq(int key, struct tty_struct *tty, int check_mask) 495void __handle_sysrq(int key, bool check_mask)
497{ 496{
498 struct sysrq_key_op *op_p; 497 struct sysrq_key_op *op_p;
499 int orig_log_level; 498 int orig_log_level;
@@ -520,7 +519,7 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
520 if (!check_mask || sysrq_on_mask(op_p->enable_mask)) { 519 if (!check_mask || sysrq_on_mask(op_p->enable_mask)) {
521 printk("%s\n", op_p->action_msg); 520 printk("%s\n", op_p->action_msg);
522 console_loglevel = orig_log_level; 521 console_loglevel = orig_log_level;
523 op_p->handler(key, tty); 522 op_p->handler(key);
524 } else { 523 } else {
525 printk("This sysrq operation is disabled.\n"); 524 printk("This sysrq operation is disabled.\n");
526 } 525 }
@@ -545,10 +544,10 @@ void __handle_sysrq(int key, struct tty_struct *tty, int check_mask)
545 spin_unlock_irqrestore(&sysrq_key_table_lock, flags); 544 spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
546} 545}
547 546
548void handle_sysrq(int key, struct tty_struct *tty) 547void handle_sysrq(int key)
549{ 548{
550 if (sysrq_on()) 549 if (sysrq_on())
551 __handle_sysrq(key, tty, 1); 550 __handle_sysrq(key, true);
552} 551}
553EXPORT_SYMBOL(handle_sysrq); 552EXPORT_SYMBOL(handle_sysrq);
554 553
@@ -597,7 +596,7 @@ static bool sysrq_filter(struct input_handle *handle, unsigned int type,
597 596
598 default: 597 default:
599 if (sysrq_down && value && value != 2) 598 if (sysrq_down && value && value != 2)
600 __handle_sysrq(sysrq_xlate[code], NULL, 1); 599 __handle_sysrq(sysrq_xlate[code], true);
601 break; 600 break;
602 } 601 }
603 602
@@ -765,7 +764,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
765 764
766 if (get_user(c, buf)) 765 if (get_user(c, buf))
767 return -EFAULT; 766 return -EFAULT;
768 __handle_sysrq(c, NULL, 0); 767 __handle_sysrq(c, false);
769 } 768 }
770 769
771 return count; 770 return count;
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 0350c42375a2..613c852ee0fe 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -136,6 +136,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */
136DEFINE_MUTEX(tty_mutex); 136DEFINE_MUTEX(tty_mutex);
137EXPORT_SYMBOL(tty_mutex); 137EXPORT_SYMBOL(tty_mutex);
138 138
139/* Spinlock to protect the tty->tty_files list */
140DEFINE_SPINLOCK(tty_files_lock);
141
139static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); 142static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
140static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); 143static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
141ssize_t redirected_tty_write(struct file *, const char __user *, 144ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -185,6 +188,41 @@ void free_tty_struct(struct tty_struct *tty)
185 kfree(tty); 188 kfree(tty);
186} 189}
187 190
191static inline struct tty_struct *file_tty(struct file *file)
192{
193 return ((struct tty_file_private *)file->private_data)->tty;
194}
195
196/* Associate a new file with the tty structure */
197void tty_add_file(struct tty_struct *tty, struct file *file)
198{
199 struct tty_file_private *priv;
200
201 /* XXX: must implement proper error handling in callers */
202 priv = kmalloc(sizeof(*priv), GFP_KERNEL|__GFP_NOFAIL);
203
204 priv->tty = tty;
205 priv->file = file;
206 file->private_data = priv;
207
208 spin_lock(&tty_files_lock);
209 list_add(&priv->list, &tty->tty_files);
210 spin_unlock(&tty_files_lock);
211}
212
213/* Delete file from its tty */
214void tty_del_file(struct file *file)
215{
216 struct tty_file_private *priv = file->private_data;
217
218 spin_lock(&tty_files_lock);
219 list_del(&priv->list);
220 spin_unlock(&tty_files_lock);
221 file->private_data = NULL;
222 kfree(priv);
223}
224
225
188#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base) 226#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
189 227
190/** 228/**
@@ -235,11 +273,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
235 struct list_head *p; 273 struct list_head *p;
236 int count = 0; 274 int count = 0;
237 275
238 file_list_lock(); 276 spin_lock(&tty_files_lock);
239 list_for_each(p, &tty->tty_files) { 277 list_for_each(p, &tty->tty_files) {
240 count++; 278 count++;
241 } 279 }
242 file_list_unlock(); 280 spin_unlock(&tty_files_lock);
243 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 281 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
244 tty->driver->subtype == PTY_TYPE_SLAVE && 282 tty->driver->subtype == PTY_TYPE_SLAVE &&
245 tty->link && tty->link->count) 283 tty->link && tty->link->count)
@@ -317,7 +355,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
317 if (*stp == '\0') 355 if (*stp == '\0')
318 stp = NULL; 356 stp = NULL;
319 357
320 if (tty_line >= 0 && tty_line <= p->num && p->ops && 358 if (tty_line >= 0 && tty_line < p->num && p->ops &&
321 p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) { 359 p->ops->poll_init && !p->ops->poll_init(p, tty_line, stp)) {
322 res = tty_driver_kref_get(p); 360 res = tty_driver_kref_get(p);
323 *line = tty_line; 361 *line = tty_line;
@@ -497,6 +535,7 @@ void __tty_hangup(struct tty_struct *tty)
497 struct file *cons_filp = NULL; 535 struct file *cons_filp = NULL;
498 struct file *filp, *f = NULL; 536 struct file *filp, *f = NULL;
499 struct task_struct *p; 537 struct task_struct *p;
538 struct tty_file_private *priv;
500 int closecount = 0, n; 539 int closecount = 0, n;
501 unsigned long flags; 540 unsigned long flags;
502 int refs = 0; 541 int refs = 0;
@@ -506,7 +545,7 @@ void __tty_hangup(struct tty_struct *tty)
506 545
507 546
508 spin_lock(&redirect_lock); 547 spin_lock(&redirect_lock);
509 if (redirect && redirect->private_data == tty) { 548 if (redirect && file_tty(redirect) == tty) {
510 f = redirect; 549 f = redirect;
511 redirect = NULL; 550 redirect = NULL;
512 } 551 }
@@ -519,9 +558,10 @@ void __tty_hangup(struct tty_struct *tty)
519 workqueue with the lock held */ 558 workqueue with the lock held */
520 check_tty_count(tty, "tty_hangup"); 559 check_tty_count(tty, "tty_hangup");
521 560
522 file_list_lock(); 561 spin_lock(&tty_files_lock);
523 /* This breaks for file handles being sent over AF_UNIX sockets ? */ 562 /* This breaks for file handles being sent over AF_UNIX sockets ? */
524 list_for_each_entry(filp, &tty->tty_files, f_u.fu_list) { 563 list_for_each_entry(priv, &tty->tty_files, list) {
564 filp = priv->file;
525 if (filp->f_op->write == redirected_tty_write) 565 if (filp->f_op->write == redirected_tty_write)
526 cons_filp = filp; 566 cons_filp = filp;
527 if (filp->f_op->write != tty_write) 567 if (filp->f_op->write != tty_write)
@@ -530,7 +570,7 @@ void __tty_hangup(struct tty_struct *tty)
530 __tty_fasync(-1, filp, 0); /* can't block */ 570 __tty_fasync(-1, filp, 0); /* can't block */
531 filp->f_op = &hung_up_tty_fops; 571 filp->f_op = &hung_up_tty_fops;
532 } 572 }
533 file_list_unlock(); 573 spin_unlock(&tty_files_lock);
534 574
535 tty_ldisc_hangup(tty); 575 tty_ldisc_hangup(tty);
536 576
@@ -889,12 +929,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
889 loff_t *ppos) 929 loff_t *ppos)
890{ 930{
891 int i; 931 int i;
892 struct tty_struct *tty; 932 struct inode *inode = file->f_path.dentry->d_inode;
893 struct inode *inode; 933 struct tty_struct *tty = file_tty(file);
894 struct tty_ldisc *ld; 934 struct tty_ldisc *ld;
895 935
896 tty = file->private_data;
897 inode = file->f_path.dentry->d_inode;
898 if (tty_paranoia_check(tty, inode, "tty_read")) 936 if (tty_paranoia_check(tty, inode, "tty_read"))
899 return -EIO; 937 return -EIO;
900 if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags))) 938 if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
@@ -1065,12 +1103,11 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1065static ssize_t tty_write(struct file *file, const char __user *buf, 1103static ssize_t tty_write(struct file *file, const char __user *buf,
1066 size_t count, loff_t *ppos) 1104 size_t count, loff_t *ppos)
1067{ 1105{
1068 struct tty_struct *tty;
1069 struct inode *inode = file->f_path.dentry->d_inode; 1106 struct inode *inode = file->f_path.dentry->d_inode;
1107 struct tty_struct *tty = file_tty(file);
1108 struct tty_ldisc *ld;
1070 ssize_t ret; 1109 ssize_t ret;
1071 struct tty_ldisc *ld;
1072 1110
1073 tty = file->private_data;
1074 if (tty_paranoia_check(tty, inode, "tty_write")) 1111 if (tty_paranoia_check(tty, inode, "tty_write"))
1075 return -EIO; 1112 return -EIO;
1076 if (!tty || !tty->ops->write || 1113 if (!tty || !tty->ops->write ||
@@ -1424,9 +1461,9 @@ static void release_one_tty(struct work_struct *work)
1424 tty_driver_kref_put(driver); 1461 tty_driver_kref_put(driver);
1425 module_put(driver->owner); 1462 module_put(driver->owner);
1426 1463
1427 file_list_lock(); 1464 spin_lock(&tty_files_lock);
1428 list_del_init(&tty->tty_files); 1465 list_del_init(&tty->tty_files);
1429 file_list_unlock(); 1466 spin_unlock(&tty_files_lock);
1430 1467
1431 put_pid(tty->pgrp); 1468 put_pid(tty->pgrp);
1432 put_pid(tty->session); 1469 put_pid(tty->session);
@@ -1507,13 +1544,13 @@ static void release_tty(struct tty_struct *tty, int idx)
1507 1544
1508int tty_release(struct inode *inode, struct file *filp) 1545int tty_release(struct inode *inode, struct file *filp)
1509{ 1546{
1510 struct tty_struct *tty, *o_tty; 1547 struct tty_struct *tty = file_tty(filp);
1548 struct tty_struct *o_tty;
1511 int pty_master, tty_closing, o_tty_closing, do_sleep; 1549 int pty_master, tty_closing, o_tty_closing, do_sleep;
1512 int devpts; 1550 int devpts;
1513 int idx; 1551 int idx;
1514 char buf[64]; 1552 char buf[64];
1515 1553
1516 tty = filp->private_data;
1517 if (tty_paranoia_check(tty, inode, "tty_release_dev")) 1554 if (tty_paranoia_check(tty, inode, "tty_release_dev"))
1518 return 0; 1555 return 0;
1519 1556
@@ -1671,8 +1708,7 @@ int tty_release(struct inode *inode, struct file *filp)
1671 * - do_tty_hangup no longer sees this file descriptor as 1708 * - do_tty_hangup no longer sees this file descriptor as
1672 * something that needs to be handled for hangups. 1709 * something that needs to be handled for hangups.
1673 */ 1710 */
1674 file_kill(filp); 1711 tty_del_file(filp);
1675 filp->private_data = NULL;
1676 1712
1677 /* 1713 /*
1678 * Perform some housekeeping before deciding whether to return. 1714 * Perform some housekeeping before deciding whether to return.
@@ -1839,8 +1875,8 @@ got_driver:
1839 return PTR_ERR(tty); 1875 return PTR_ERR(tty);
1840 } 1876 }
1841 1877
1842 filp->private_data = tty; 1878 tty_add_file(tty, filp);
1843 file_move(filp, &tty->tty_files); 1879
1844 check_tty_count(tty, "tty_open"); 1880 check_tty_count(tty, "tty_open");
1845 if (tty->driver->type == TTY_DRIVER_TYPE_PTY && 1881 if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
1846 tty->driver->subtype == PTY_TYPE_MASTER) 1882 tty->driver->subtype == PTY_TYPE_MASTER)
@@ -1916,11 +1952,10 @@ got_driver:
1916 1952
1917static unsigned int tty_poll(struct file *filp, poll_table *wait) 1953static unsigned int tty_poll(struct file *filp, poll_table *wait)
1918{ 1954{
1919 struct tty_struct *tty; 1955 struct tty_struct *tty = file_tty(filp);
1920 struct tty_ldisc *ld; 1956 struct tty_ldisc *ld;
1921 int ret = 0; 1957 int ret = 0;
1922 1958
1923 tty = filp->private_data;
1924 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll")) 1959 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
1925 return 0; 1960 return 0;
1926 1961
@@ -1933,11 +1968,10 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
1933 1968
1934static int __tty_fasync(int fd, struct file *filp, int on) 1969static int __tty_fasync(int fd, struct file *filp, int on)
1935{ 1970{
1936 struct tty_struct *tty; 1971 struct tty_struct *tty = file_tty(filp);
1937 unsigned long flags; 1972 unsigned long flags;
1938 int retval = 0; 1973 int retval = 0;
1939 1974
1940 tty = filp->private_data;
1941 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync")) 1975 if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
1942 goto out; 1976 goto out;
1943 1977
@@ -2491,13 +2525,13 @@ EXPORT_SYMBOL(tty_pair_get_pty);
2491 */ 2525 */
2492long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2526long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2493{ 2527{
2494 struct tty_struct *tty, *real_tty; 2528 struct tty_struct *tty = file_tty(file);
2529 struct tty_struct *real_tty;
2495 void __user *p = (void __user *)arg; 2530 void __user *p = (void __user *)arg;
2496 int retval; 2531 int retval;
2497 struct tty_ldisc *ld; 2532 struct tty_ldisc *ld;
2498 struct inode *inode = file->f_dentry->d_inode; 2533 struct inode *inode = file->f_dentry->d_inode;
2499 2534
2500 tty = file->private_data;
2501 if (tty_paranoia_check(tty, inode, "tty_ioctl")) 2535 if (tty_paranoia_check(tty, inode, "tty_ioctl"))
2502 return -EINVAL; 2536 return -EINVAL;
2503 2537
@@ -2619,7 +2653,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
2619 unsigned long arg) 2653 unsigned long arg)
2620{ 2654{
2621 struct inode *inode = file->f_dentry->d_inode; 2655 struct inode *inode = file->f_dentry->d_inode;
2622 struct tty_struct *tty = file->private_data; 2656 struct tty_struct *tty = file_tty(file);
2623 struct tty_ldisc *ld; 2657 struct tty_ldisc *ld;
2624 int retval = -ENOIOCTLCMD; 2658 int retval = -ENOIOCTLCMD;
2625 2659
@@ -2711,7 +2745,7 @@ void __do_SAK(struct tty_struct *tty)
2711 if (!filp) 2745 if (!filp)
2712 continue; 2746 continue;
2713 if (filp->f_op->read == tty_read && 2747 if (filp->f_op->read == tty_read &&
2714 filp->private_data == tty) { 2748 file_tty(filp) == tty) {
2715 printk(KERN_NOTICE "SAK: killed process %d" 2749 printk(KERN_NOTICE "SAK: killed process %d"
2716 " (%s): fd#%d opened to the tty\n", 2750 " (%s): fd#%d opened to the tty\n",
2717 task_pid_nr(p), p->comm, i); 2751 task_pid_nr(p), p->comm, i);
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index c734f9b1263a..281aada7b4a1 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -194,10 +194,11 @@ static DECLARE_WORK(console_work, console_callback);
194int fg_console; 194int fg_console;
195int last_console; 195int last_console;
196int want_console = -1; 196int want_console = -1;
197int saved_fg_console; 197static int saved_fg_console;
198int saved_last_console; 198static int saved_last_console;
199int saved_want_console; 199static int saved_want_console;
200int saved_vc_mode; 200static int saved_vc_mode;
201static int saved_console_blanked;
201 202
202/* 203/*
203 * For each existing display, we have a pointer to console currently visible 204 * For each existing display, we have a pointer to console currently visible
@@ -905,22 +906,16 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
905 * bottom of buffer 906 * bottom of buffer
906 */ 907 */
907 old_origin += (old_rows - new_rows) * old_row_size; 908 old_origin += (old_rows - new_rows) * old_row_size;
908 end = vc->vc_scr_end;
909 } else { 909 } else {
910 /* 910 /*
911 * Cursor is in no man's land, copy 1/2 screenful 911 * Cursor is in no man's land, copy 1/2 screenful
912 * from the top and bottom of cursor position 912 * from the top and bottom of cursor position
913 */ 913 */
914 old_origin += (vc->vc_y - new_rows/2) * old_row_size; 914 old_origin += (vc->vc_y - new_rows/2) * old_row_size;
915 end = old_origin + (old_row_size * new_rows);
916 } 915 }
917 } else 916 }
918 /* 917
919 * Cursor near the top, copy contents from the top of buffer 918 end = old_origin + old_row_size * min(old_rows, new_rows);
920 */
921 end = (old_rows > new_rows) ? old_origin +
922 (old_row_size * new_rows) :
923 vc->vc_scr_end;
924 919
925 update_attr(vc); 920 update_attr(vc);
926 921
@@ -3074,8 +3069,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
3074 3069
3075 old_was_color = vc->vc_can_do_color; 3070 old_was_color = vc->vc_can_do_color;
3076 vc->vc_sw->con_deinit(vc); 3071 vc->vc_sw->con_deinit(vc);
3077 if (!vc->vc_origin) 3072 vc->vc_origin = (unsigned long)vc->vc_screenbuf;
3078 vc->vc_origin = (unsigned long)vc->vc_screenbuf;
3079 visual_init(vc, i, 0); 3073 visual_init(vc, i, 0);
3080 set_origin(vc); 3074 set_origin(vc);
3081 update_attr(vc); 3075 update_attr(vc);
@@ -3449,6 +3443,7 @@ int con_debug_enter(struct vc_data *vc)
3449 saved_last_console = last_console; 3443 saved_last_console = last_console;
3450 saved_want_console = want_console; 3444 saved_want_console = want_console;
3451 saved_vc_mode = vc->vc_mode; 3445 saved_vc_mode = vc->vc_mode;
3446 saved_console_blanked = console_blanked;
3452 vc->vc_mode = KD_TEXT; 3447 vc->vc_mode = KD_TEXT;
3453 console_blanked = 0; 3448 console_blanked = 0;
3454 if (vc->vc_sw->con_debug_enter) 3449 if (vc->vc_sw->con_debug_enter)
@@ -3492,6 +3487,7 @@ int con_debug_leave(void)
3492 fg_console = saved_fg_console; 3487 fg_console = saved_fg_console;
3493 last_console = saved_last_console; 3488 last_console = saved_last_console;
3494 want_console = saved_want_console; 3489 want_console = saved_want_console;
3490 console_blanked = saved_console_blanked;
3495 vc_cons[fg_console].d->vc_mode = saved_vc_mode; 3491 vc_cons[fg_console].d->vc_mode = saved_vc_mode;
3496 3492
3497 vc = vc_cons[fg_console].d; 3493 vc = vc_cons[fg_console].d;
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 0ed763cd2e77..b663d573aad9 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -94,6 +94,7 @@
94 94
95#ifdef CONFIG_OF 95#ifdef CONFIG_OF
96/* For open firmware. */ 96/* For open firmware. */
97#include <linux/of_address.h>
97#include <linux/of_device.h> 98#include <linux/of_device.h>
98#include <linux/of_platform.h> 99#include <linux/of_platform.h>
99#endif 100#endif
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 670239ab7511..e7d5d6b5dcf6 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2071,16 +2071,6 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2071 amd64_handle_ce(mci, info); 2071 amd64_handle_ce(mci, info);
2072 else if (ecc_type == 1) 2072 else if (ecc_type == 1)
2073 amd64_handle_ue(mci, info); 2073 amd64_handle_ue(mci, info);
2074
2075 /*
2076 * If main error is CE then overflow must be CE. If main error is UE
2077 * then overflow is unknown. We'll call the overflow a CE - if
2078 * panic_on_ue is set then we're already panic'ed and won't arrive
2079 * here. Else, then apparently someone doesn't think that UE's are
2080 * catastrophic.
2081 */
2082 if (info->nbsh & K8_NBSH_OVERFLOW)
2083 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
2084} 2074}
2085 2075
2086void amd64_decode_bus_error(int node_id, struct err_regs *regs) 2076void amd64_decode_bus_error(int node_id, struct err_regs *regs)
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index bae9351e9473..9014df6f605d 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -365,11 +365,10 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
365 365
366 pr_emerg("MC%d_STATUS: ", m->bank); 366 pr_emerg("MC%d_STATUS: ", m->bank);
367 367
368 pr_cont("%sorrected error, report: %s, MiscV: %svalid, " 368 pr_cont("%sorrected error, other errors lost: %s, "
369 "CPU context corrupt: %s", 369 "CPU context corrupt: %s",
370 ((m->status & MCI_STATUS_UC) ? "Unc" : "C"), 370 ((m->status & MCI_STATUS_UC) ? "Unc" : "C"),
371 ((m->status & MCI_STATUS_EN) ? "yes" : "no"), 371 ((m->status & MCI_STATUS_OVER) ? "yes" : "no"),
372 ((m->status & MCI_STATUS_MISCV) ? "" : "in"),
373 ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); 372 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
374 373
375 /* do the two bits[14:13] together */ 374 /* do the two bits[14:13] together */
@@ -426,11 +425,15 @@ static struct notifier_block amd_mce_dec_nb = {
426static int __init mce_amd_init(void) 425static int __init mce_amd_init(void)
427{ 426{
428 /* 427 /*
429 * We can decode MCEs for Opteron and later CPUs: 428 * We can decode MCEs for K8, F10h and F11h CPUs:
430 */ 429 */
431 if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && 430 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
432 (boot_cpu_data.x86 >= 0xf)) 431 return 0;
433 atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); 432
433 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
434 return 0;
435
436 atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
434 437
435 return 0; 438 return 0;
436} 439}
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index ca7ca56661e0..b42a0bde8494 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -81,6 +81,10 @@ static int close_transaction(struct fw_transaction *transaction,
81 spin_lock_irqsave(&card->lock, flags); 81 spin_lock_irqsave(&card->lock, flags);
82 list_for_each_entry(t, &card->transaction_list, link) { 82 list_for_each_entry(t, &card->transaction_list, link) {
83 if (t == transaction) { 83 if (t == transaction) {
84 if (!del_timer(&t->split_timeout_timer)) {
85 spin_unlock_irqrestore(&card->lock, flags);
86 goto timed_out;
87 }
84 list_del_init(&t->link); 88 list_del_init(&t->link);
85 card->tlabel_mask &= ~(1ULL << t->tlabel); 89 card->tlabel_mask &= ~(1ULL << t->tlabel);
86 break; 90 break;
@@ -89,11 +93,11 @@ static int close_transaction(struct fw_transaction *transaction,
89 spin_unlock_irqrestore(&card->lock, flags); 93 spin_unlock_irqrestore(&card->lock, flags);
90 94
91 if (&t->link != &card->transaction_list) { 95 if (&t->link != &card->transaction_list) {
92 del_timer_sync(&t->split_timeout_timer);
93 t->callback(card, rcode, NULL, 0, t->callback_data); 96 t->callback(card, rcode, NULL, 0, t->callback_data);
94 return 0; 97 return 0;
95 } 98 }
96 99
100 timed_out:
97 return -ENOENT; 101 return -ENOENT;
98} 102}
99 103
@@ -921,6 +925,10 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
921 spin_lock_irqsave(&card->lock, flags); 925 spin_lock_irqsave(&card->lock, flags);
922 list_for_each_entry(t, &card->transaction_list, link) { 926 list_for_each_entry(t, &card->transaction_list, link) {
923 if (t->node_id == source && t->tlabel == tlabel) { 927 if (t->node_id == source && t->tlabel == tlabel) {
928 if (!del_timer(&t->split_timeout_timer)) {
929 spin_unlock_irqrestore(&card->lock, flags);
930 goto timed_out;
931 }
924 list_del_init(&t->link); 932 list_del_init(&t->link);
925 card->tlabel_mask &= ~(1ULL << t->tlabel); 933 card->tlabel_mask &= ~(1ULL << t->tlabel);
926 break; 934 break;
@@ -929,6 +937,7 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
929 spin_unlock_irqrestore(&card->lock, flags); 937 spin_unlock_irqrestore(&card->lock, flags);
930 938
931 if (&t->link == &card->transaction_list) { 939 if (&t->link == &card->transaction_list) {
940 timed_out:
932 fw_notify("Unsolicited response (source %x, tlabel %x)\n", 941 fw_notify("Unsolicited response (source %x, tlabel %x)\n",
933 source, tlabel); 942 source, tlabel);
934 return; 943 return;
@@ -963,8 +972,6 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
963 break; 972 break;
964 } 973 }
965 974
966 del_timer_sync(&t->split_timeout_timer);
967
968 /* 975 /*
969 * The response handler may be executed while the request handler 976 * The response handler may be executed while the request handler
970 * is still pending. Cancel the request handler. 977 * is still pending. Cancel the request handler.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 51e8a35ebd7e..18fdd9703b48 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -578,7 +578,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
578 if (!peer) { 578 if (!peer) {
579 fw_notify("No peer for ARP packet from %016llx\n", 579 fw_notify("No peer for ARP packet from %016llx\n",
580 (unsigned long long)peer_guid); 580 (unsigned long long)peer_guid);
581 goto failed_proto; 581 goto no_peer;
582 } 582 }
583 583
584 /* 584 /*
@@ -655,7 +655,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
655 655
656 return 0; 656 return 0;
657 657
658 failed_proto: 658 no_peer:
659 net->stats.rx_errors++; 659 net->stats.rx_errors++;
660 net->stats.rx_dropped++; 660 net->stats.rx_dropped++;
661 661
@@ -663,7 +663,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
663 if (netif_queue_stopped(net)) 663 if (netif_queue_stopped(net))
664 netif_wake_queue(net); 664 netif_wake_queue(net);
665 665
666 return 0; 666 return -ENOENT;
667} 667}
668 668
669static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, 669static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
@@ -700,7 +700,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
700 fw_error("out of memory\n"); 700 fw_error("out of memory\n");
701 net->stats.rx_dropped++; 701 net->stats.rx_dropped++;
702 702
703 return -1; 703 return -ENOMEM;
704 } 704 }
705 skb_reserve(skb, (net->hard_header_len + 15) & ~15); 705 skb_reserve(skb, (net->hard_header_len + 15) & ~15);
706 memcpy(skb_put(skb, len), buf, len); 706 memcpy(skb_put(skb, len), buf, len);
@@ -725,8 +725,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
725 spin_lock_irqsave(&dev->lock, flags); 725 spin_lock_irqsave(&dev->lock, flags);
726 726
727 peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); 727 peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
728 if (!peer) 728 if (!peer) {
729 goto bad_proto; 729 retval = -ENOENT;
730 goto fail;
731 }
730 732
731 pd = fwnet_pd_find(peer, datagram_label); 733 pd = fwnet_pd_find(peer, datagram_label);
732 if (pd == NULL) { 734 if (pd == NULL) {
@@ -740,7 +742,7 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
740 dg_size, buf, fg_off, len); 742 dg_size, buf, fg_off, len);
741 if (pd == NULL) { 743 if (pd == NULL) {
742 retval = -ENOMEM; 744 retval = -ENOMEM;
743 goto bad_proto; 745 goto fail;
744 } 746 }
745 peer->pdg_size++; 747 peer->pdg_size++;
746 } else { 748 } else {
@@ -754,9 +756,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
754 pd = fwnet_pd_new(net, peer, datagram_label, 756 pd = fwnet_pd_new(net, peer, datagram_label,
755 dg_size, buf, fg_off, len); 757 dg_size, buf, fg_off, len);
756 if (pd == NULL) { 758 if (pd == NULL) {
757 retval = -ENOMEM;
758 peer->pdg_size--; 759 peer->pdg_size--;
759 goto bad_proto; 760 retval = -ENOMEM;
761 goto fail;
760 } 762 }
761 } else { 763 } else {
762 if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { 764 if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
@@ -767,7 +769,8 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
767 */ 769 */
768 fwnet_pd_delete(pd); 770 fwnet_pd_delete(pd);
769 peer->pdg_size--; 771 peer->pdg_size--;
770 goto bad_proto; 772 retval = -ENOMEM;
773 goto fail;
771 } 774 }
772 } 775 }
773 } /* new datagram or add to existing one */ 776 } /* new datagram or add to existing one */
@@ -793,14 +796,13 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
793 spin_unlock_irqrestore(&dev->lock, flags); 796 spin_unlock_irqrestore(&dev->lock, flags);
794 797
795 return 0; 798 return 0;
796 799 fail:
797 bad_proto:
798 spin_unlock_irqrestore(&dev->lock, flags); 800 spin_unlock_irqrestore(&dev->lock, flags);
799 801
800 if (netif_queue_stopped(net)) 802 if (netif_queue_stopped(net))
801 netif_wake_queue(net); 803 netif_wake_queue(net);
802 804
803 return 0; 805 return retval;
804} 806}
805 807
806static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, 808static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 7f03540cabe8..be29b0bb2471 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -694,7 +694,15 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
694 log_ar_at_event('R', p.speed, p.header, evt); 694 log_ar_at_event('R', p.speed, p.header, evt);
695 695
696 /* 696 /*
697 * The OHCI bus reset handler synthesizes a phy packet with 697 * Several controllers, notably from NEC and VIA, forget to
698 * write ack_complete status at PHY packet reception.
699 */
700 if (evt == OHCI1394_evt_no_status &&
701 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
702 p.ack = ACK_COMPLETE;
703
704 /*
705 * The OHCI bus reset handler synthesizes a PHY packet with
698 * the new generation number when a bus reset happens (see 706 * the new generation number when a bus reset happens (see
699 * section 8.4.2.3). This helps us determine when a request 707 * section 8.4.2.3). This helps us determine when a request
700 * was received and make sure we send the response in the same 708 * was received and make sure we send the response in the same
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 9f76171717e5..bfae4b309791 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -450,7 +450,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
450 450
451 if (&orb->link != &lu->orb_list) { 451 if (&orb->link != &lu->orb_list) {
452 orb->callback(orb, &status); 452 orb->callback(orb, &status);
453 kref_put(&orb->kref, free_orb); 453 kref_put(&orb->kref, free_orb); /* orb callback reference */
454 } else { 454 } else {
455 fw_error("status write for unknown orb\n"); 455 fw_error("status write for unknown orb\n");
456 } 456 }
@@ -472,20 +472,28 @@ static void complete_transaction(struct fw_card *card, int rcode,
472 * So this callback only sets the rcode if it hasn't already 472 * So this callback only sets the rcode if it hasn't already
473 * been set and only does the cleanup if the transaction 473 * been set and only does the cleanup if the transaction
474 * failed and we didn't already get a status write. 474 * failed and we didn't already get a status write.
475 *
476 * Here we treat RCODE_CANCELLED like RCODE_COMPLETE because some
477 * OXUF936QSE firmwares occasionally respond after Split_Timeout and
478 * complete the ORB just fine. Note, we also get RCODE_CANCELLED
479 * from sbp2_cancel_orbs() if fw_cancel_transaction() == 0.
475 */ 480 */
476 spin_lock_irqsave(&card->lock, flags); 481 spin_lock_irqsave(&card->lock, flags);
477 482
478 if (orb->rcode == -1) 483 if (orb->rcode == -1)
479 orb->rcode = rcode; 484 orb->rcode = rcode;
480 if (orb->rcode != RCODE_COMPLETE) { 485
486 if (orb->rcode != RCODE_COMPLETE && orb->rcode != RCODE_CANCELLED) {
481 list_del(&orb->link); 487 list_del(&orb->link);
482 spin_unlock_irqrestore(&card->lock, flags); 488 spin_unlock_irqrestore(&card->lock, flags);
489
483 orb->callback(orb, NULL); 490 orb->callback(orb, NULL);
491 kref_put(&orb->kref, free_orb); /* orb callback reference */
484 } else { 492 } else {
485 spin_unlock_irqrestore(&card->lock, flags); 493 spin_unlock_irqrestore(&card->lock, flags);
486 } 494 }
487 495
488 kref_put(&orb->kref, free_orb); 496 kref_put(&orb->kref, free_orb); /* transaction callback reference */
489} 497}
490 498
491static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu, 499static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
@@ -501,9 +509,8 @@ static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
501 list_add_tail(&orb->link, &lu->orb_list); 509 list_add_tail(&orb->link, &lu->orb_list);
502 spin_unlock_irqrestore(&device->card->lock, flags); 510 spin_unlock_irqrestore(&device->card->lock, flags);
503 511
504 /* Take a ref for the orb list and for the transaction callback. */ 512 kref_get(&orb->kref); /* transaction callback reference */
505 kref_get(&orb->kref); 513 kref_get(&orb->kref); /* orb callback reference */
506 kref_get(&orb->kref);
507 514
508 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST, 515 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
509 node_id, generation, device->max_speed, offset, 516 node_id, generation, device->max_speed, offset,
@@ -525,11 +532,11 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
525 532
526 list_for_each_entry_safe(orb, next, &list, link) { 533 list_for_each_entry_safe(orb, next, &list, link) {
527 retval = 0; 534 retval = 0;
528 if (fw_cancel_transaction(device->card, &orb->t) == 0) 535 fw_cancel_transaction(device->card, &orb->t);
529 continue;
530 536
531 orb->rcode = RCODE_CANCELLED; 537 orb->rcode = RCODE_CANCELLED;
532 orb->callback(orb, NULL); 538 orb->callback(orb, NULL);
539 kref_put(&orb->kref, free_orb); /* orb callback reference */
533 } 540 }
534 541
535 return retval; 542 return retval;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7e31d4348340..d2ab01e90a96 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,9 @@
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_fb_helper.h" 35#include "drm_fb_helper.h"
36 36
37static bool drm_kms_helper_poll = true;
38module_param_named(poll, drm_kms_helper_poll, bool, 0600);
39
37static void drm_mode_validate_flag(struct drm_connector *connector, 40static void drm_mode_validate_flag(struct drm_connector *connector,
38 int flags) 41 int flags)
39{ 42{
@@ -99,8 +102,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
99 connector->status = connector_status_disconnected; 102 connector->status = connector_status_disconnected;
100 if (connector->funcs->force) 103 if (connector->funcs->force)
101 connector->funcs->force(connector); 104 connector->funcs->force(connector);
102 } else 105 } else {
103 connector->status = connector->funcs->detect(connector); 106 connector->status = connector->funcs->detect(connector);
107 drm_helper_hpd_irq_event(dev);
108 }
104 109
105 if (connector->status == connector_status_disconnected) { 110 if (connector->status == connector_status_disconnected) {
106 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 111 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -110,11 +115,10 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
110 } 115 }
111 116
112 count = (*connector_funcs->get_modes)(connector); 117 count = (*connector_funcs->get_modes)(connector);
113 if (!count) { 118 if (count == 0 && connector->status == connector_status_connected)
114 count = drm_add_modes_noedid(connector, 1024, 768); 119 count = drm_add_modes_noedid(connector, 1024, 768);
115 if (!count) 120 if (count == 0)
116 return 0; 121 goto prune;
117 }
118 122
119 drm_mode_connector_list_update(connector); 123 drm_mode_connector_list_update(connector);
120 124
@@ -840,6 +844,9 @@ static void output_poll_execute(struct work_struct *work)
840 enum drm_connector_status old_status, status; 844 enum drm_connector_status old_status, status;
841 bool repoll = false, changed = false; 845 bool repoll = false, changed = false;
842 846
847 if (!drm_kms_helper_poll)
848 return;
849
843 mutex_lock(&dev->mode_config.mutex); 850 mutex_lock(&dev->mode_config.mutex);
844 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 851 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
845 852
@@ -890,6 +897,9 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
890 bool poll = false; 897 bool poll = false;
891 struct drm_connector *connector; 898 struct drm_connector *connector;
892 899
900 if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
901 return;
902
893 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 903 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
894 if (connector->polled) 904 if (connector->polled)
895 poll = true; 905 poll = true;
@@ -919,8 +929,10 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
919{ 929{
920 if (!dev->mode_config.poll_enabled) 930 if (!dev->mode_config.poll_enabled)
921 return; 931 return;
932
922 /* kill timer and schedule immediate execution, this doesn't block */ 933 /* kill timer and schedule immediate execution, this doesn't block */
923 cancel_delayed_work(&dev->mode_config.output_poll_work); 934 cancel_delayed_work(&dev->mode_config.output_poll_work);
924 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); 935 if (drm_kms_helper_poll)
936 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
925} 937}
926EXPORT_SYMBOL(drm_helper_hpd_irq_event); 938EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 90288ec7c284..84da748555bc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -55,6 +55,9 @@
55static int drm_version(struct drm_device *dev, void *data, 55static int drm_version(struct drm_device *dev, void *data,
56 struct drm_file *file_priv); 56 struct drm_file *file_priv);
57 57
58#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
59 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0}
60
58/** Ioctl table */ 61/** Ioctl table */
59static struct drm_ioctl_desc drm_ioctls[] = { 62static struct drm_ioctl_desc drm_ioctls[] = {
60 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), 63 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
@@ -421,6 +424,7 @@ long drm_ioctl(struct file *filp,
421 int retcode = -EINVAL; 424 int retcode = -EINVAL;
422 char stack_kdata[128]; 425 char stack_kdata[128];
423 char *kdata = NULL; 426 char *kdata = NULL;
427 unsigned int usize, asize;
424 428
425 dev = file_priv->minor->dev; 429 dev = file_priv->minor->dev;
426 atomic_inc(&dev->ioctl_count); 430 atomic_inc(&dev->ioctl_count);
@@ -436,11 +440,18 @@ long drm_ioctl(struct file *filp,
436 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 440 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
437 goto err_i1; 441 goto err_i1;
438 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && 442 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
439 (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) 443 (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
444 u32 drv_size;
440 ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; 445 ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
446 drv_size = _IOC_SIZE(ioctl->cmd_drv);
447 usize = asize = _IOC_SIZE(cmd);
448 if (drv_size > asize)
449 asize = drv_size;
450 }
441 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { 451 else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
442 ioctl = &drm_ioctls[nr]; 452 ioctl = &drm_ioctls[nr];
443 cmd = ioctl->cmd; 453 cmd = ioctl->cmd;
454 usize = asize = _IOC_SIZE(cmd);
444 } else 455 } else
445 goto err_i1; 456 goto err_i1;
446 457
@@ -460,10 +471,10 @@ long drm_ioctl(struct file *filp,
460 retcode = -EACCES; 471 retcode = -EACCES;
461 } else { 472 } else {
462 if (cmd & (IOC_IN | IOC_OUT)) { 473 if (cmd & (IOC_IN | IOC_OUT)) {
463 if (_IOC_SIZE(cmd) <= sizeof(stack_kdata)) { 474 if (asize <= sizeof(stack_kdata)) {
464 kdata = stack_kdata; 475 kdata = stack_kdata;
465 } else { 476 } else {
466 kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); 477 kdata = kmalloc(asize, GFP_KERNEL);
467 if (!kdata) { 478 if (!kdata) {
468 retcode = -ENOMEM; 479 retcode = -ENOMEM;
469 goto err_i1; 480 goto err_i1;
@@ -473,11 +484,13 @@ long drm_ioctl(struct file *filp,
473 484
474 if (cmd & IOC_IN) { 485 if (cmd & IOC_IN) {
475 if (copy_from_user(kdata, (void __user *)arg, 486 if (copy_from_user(kdata, (void __user *)arg,
476 _IOC_SIZE(cmd)) != 0) { 487 usize) != 0) {
477 retcode = -EFAULT; 488 retcode = -EFAULT;
478 goto err_i1; 489 goto err_i1;
479 } 490 }
480 } 491 } else
492 memset(kdata, 0, usize);
493
481 if (ioctl->flags & DRM_UNLOCKED) 494 if (ioctl->flags & DRM_UNLOCKED)
482 retcode = func(dev, kdata, file_priv); 495 retcode = func(dev, kdata, file_priv);
483 else { 496 else {
@@ -488,7 +501,7 @@ long drm_ioctl(struct file *filp,
488 501
489 if (cmd & IOC_OUT) { 502 if (cmd & IOC_OUT) {
490 if (copy_to_user((void __user *)arg, kdata, 503 if (copy_to_user((void __user *)arg, kdata,
491 _IOC_SIZE(cmd)) != 0) 504 usize) != 0)
492 retcode = -EFAULT; 505 retcode = -EFAULT;
493 } 506 }
494 } 507 }
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index de82e201d682..6a5e403f9aa1 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -94,10 +94,11 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
94 int i; 94 int i;
95 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED; 95 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
96 struct drm_fb_helper_cmdline_mode *cmdline_mode; 96 struct drm_fb_helper_cmdline_mode *cmdline_mode;
97 struct drm_connector *connector = fb_helper_conn->connector; 97 struct drm_connector *connector;
98 98
99 if (!fb_helper_conn) 99 if (!fb_helper_conn)
100 return false; 100 return false;
101 connector = fb_helper_conn->connector;
101 102
102 cmdline_mode = &fb_helper_conn->cmdline_mode; 103 cmdline_mode = &fb_helper_conn->cmdline_mode;
103 if (!mode_option) 104 if (!mode_option)
@@ -369,7 +370,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
369} 370}
370static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); 371static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
371 372
372static void drm_fb_helper_sysrq(int dummy1, struct tty_struct *dummy3) 373static void drm_fb_helper_sysrq(int dummy1)
373{ 374{
374 schedule_work(&drm_fb_helper_restore_work); 375 schedule_work(&drm_fb_helper_restore_work);
375} 376}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a652a65546f..b744dad5c237 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -41,6 +41,7 @@
41 41
42/* from BKL pushdown: note that nothing else serializes idr_find() */ 42/* from BKL pushdown: note that nothing else serializes idr_find() */
43DEFINE_MUTEX(drm_global_mutex); 43DEFINE_MUTEX(drm_global_mutex);
44EXPORT_SYMBOL(drm_global_mutex);
44 45
45static int drm_open_helper(struct inode *inode, struct file *filp, 46static int drm_open_helper(struct inode *inode, struct file *filp,
46 struct drm_device * dev); 47 struct drm_device * dev);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index e2f70a516c34..9bf93bc9a32c 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -92,7 +92,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
92 } 92 }
93 93
94 /* Contention */ 94 /* Contention */
95 mutex_unlock(&drm_global_mutex);
95 schedule(); 96 schedule();
97 mutex_lock(&drm_global_mutex);
96 if (signal_pending(current)) { 98 if (signal_pending(current)) {
97 ret = -EINTR; 99 ret = -EINTR;
98 break; 100 break;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index da99edc50888..a6bfc302ed90 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -285,21 +285,21 @@ void drm_mm_put_block(struct drm_mm_node *cur)
285 285
286EXPORT_SYMBOL(drm_mm_put_block); 286EXPORT_SYMBOL(drm_mm_put_block);
287 287
288static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size, 288static int check_free_hole(unsigned long start, unsigned long end,
289 unsigned alignment) 289 unsigned long size, unsigned alignment)
290{ 290{
291 unsigned wasted = 0; 291 unsigned wasted = 0;
292 292
293 if (entry->size < size) 293 if (end - start < size)
294 return 0; 294 return 0;
295 295
296 if (alignment) { 296 if (alignment) {
297 register unsigned tmp = entry->start % alignment; 297 unsigned tmp = start % alignment;
298 if (tmp) 298 if (tmp)
299 wasted = alignment - tmp; 299 wasted = alignment - tmp;
300 } 300 }
301 301
302 if (entry->size >= size + wasted) { 302 if (end >= start + size + wasted) {
303 return 1; 303 return 1;
304 } 304 }
305 305
@@ -320,7 +320,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
320 best_size = ~0UL; 320 best_size = ~0UL;
321 321
322 list_for_each_entry(entry, &mm->free_stack, free_stack) { 322 list_for_each_entry(entry, &mm->free_stack, free_stack) {
323 if (!check_free_mm_node(entry, size, alignment)) 323 if (!check_free_hole(entry->start, entry->start + entry->size,
324 size, alignment))
324 continue; 325 continue;
325 326
326 if (!best_match) 327 if (!best_match)
@@ -353,10 +354,12 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
353 best_size = ~0UL; 354 best_size = ~0UL;
354 355
355 list_for_each_entry(entry, &mm->free_stack, free_stack) { 356 list_for_each_entry(entry, &mm->free_stack, free_stack) {
356 if (entry->start > end || (entry->start+entry->size) < start) 357 unsigned long adj_start = entry->start < start ?
357 continue; 358 start : entry->start;
359 unsigned long adj_end = entry->start + entry->size > end ?
360 end : entry->start + entry->size;
358 361
359 if (!check_free_mm_node(entry, size, alignment)) 362 if (!check_free_hole(adj_start, adj_end, size, alignment))
360 continue; 363 continue;
361 364
362 if (!best_match) 365 if (!best_match)
@@ -449,7 +452,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
449 node->free_stack.prev = prev_free; 452 node->free_stack.prev = prev_free;
450 node->free_stack.next = next_free; 453 node->free_stack.next = next_free;
451 454
452 if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) { 455 if (check_free_hole(node->start, node->start + node->size,
456 mm->scan_size, mm->scan_alignment)) {
453 mm->scan_hit_start = node->start; 457 mm->scan_hit_start = node->start;
454 mm->scan_hit_size = node->size; 458 mm->scan_hit_size = node->size;
455 459
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index f1f473ea97d3..949326d2a8e5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -251,7 +251,10 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
251 drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK; 251 drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK;
252 /* Fill in HSync values */ 252 /* Fill in HSync values */
253 drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2; 253 drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2;
254 drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC; 254 drm_mode->hsync_start = drm_mode->hsync_end - CVT_RB_H_SYNC;
255 /* Fill in VSync values */
256 drm_mode->vsync_start = drm_mode->vdisplay + CVT_RB_VFPORCH;
257 drm_mode->vsync_end = drm_mode->vsync_start + vsync;
255 } 258 }
256 /* 15/13. Find pixel clock frequency (kHz for xf86) */ 259 /* 15/13. Find pixel clock frequency (kHz for xf86) */
257 drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod; 260 drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 3778360eceea..fda67468e603 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -138,7 +138,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
138 break; 138 break;
139 } 139 }
140 140
141 if (!agpmem) 141 if (&agpmem->head == &dev->agp->memory)
142 goto vm_fault_error; 142 goto vm_fault_error;
143 143
144 /* 144 /*
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 0e6c131313d9..61b4caf220fa 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -1255,21 +1255,21 @@ long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1255} 1255}
1256 1256
1257struct drm_ioctl_desc i810_ioctls[] = { 1257struct drm_ioctl_desc i810_ioctls[] = {
1258 DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1258 DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1259 DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), 1259 DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1260 DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), 1260 DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
1261 DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), 1261 DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
1262 DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED), 1262 DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
1263 DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED), 1263 DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
1264 DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED), 1264 DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
1265 DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED), 1265 DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
1266 DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED), 1266 DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
1267 DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED), 1267 DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
1268 DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED), 1268 DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
1269 DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED), 1269 DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
1270 DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1270 DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1271 DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED), 1271 DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
1272 DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), 1272 DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
1273}; 1273};
1274 1274
1275int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); 1275int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 5168862c9227..671aa18415ac 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -1524,20 +1524,20 @@ long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1524} 1524}
1525 1525
1526struct drm_ioctl_desc i830_ioctls[] = { 1526struct drm_ioctl_desc i830_ioctls[] = {
1527 DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1527 DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1528 DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), 1528 DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
1529 DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), 1529 DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
1530 DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), 1530 DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
1531 DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), 1531 DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
1532 DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), 1532 DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
1533 DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), 1533 DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
1534 DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), 1534 DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
1535 DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), 1535 DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
1536 DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), 1536 DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
1537 DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), 1537 DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
1538 DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), 1538 DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
1539 DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), 1539 DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
1540 DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), 1540 DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
1541}; 1541};
1542 1542
1543int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); 1543int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index da78f2c0d909..5c8e53458edb 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
8 i915_suspend.o \ 8 i915_suspend.o \
9 i915_gem.o \ 9 i915_gem.o \
10 i915_gem_debug.o \ 10 i915_gem_debug.o \
11 i915_gem_evict.o \
11 i915_gem_tiling.o \ 12 i915_gem_tiling.o \
12 i915_trace_points.o \ 13 i915_trace_points.o \
13 intel_display.o \ 14 intel_display.o \
@@ -18,6 +19,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
18 intel_hdmi.o \ 19 intel_hdmi.o \
19 intel_sdvo.o \ 20 intel_sdvo.o \
20 intel_modes.o \ 21 intel_modes.o \
22 intel_panel.o \
21 intel_i2c.o \ 23 intel_i2c.o \
22 intel_fb.o \ 24 intel_fb.o \
23 intel_tv.o \ 25 intel_tv.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 0d6ff640e1c6..8c2ad014c47f 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -30,20 +30,17 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31 31
32struct intel_dvo_device { 32struct intel_dvo_device {
33 char *name; 33 const char *name;
34 int type; 34 int type;
35 /* DVOA/B/C output register */ 35 /* DVOA/B/C output register */
36 u32 dvo_reg; 36 u32 dvo_reg;
37 /* GPIO register used for i2c bus to control this device */ 37 /* GPIO register used for i2c bus to control this device */
38 u32 gpio; 38 u32 gpio;
39 int slave_addr; 39 int slave_addr;
40 struct i2c_adapter *i2c_bus;
41 40
42 const struct intel_dvo_dev_ops *dev_ops; 41 const struct intel_dvo_dev_ops *dev_ops;
43 void *dev_priv; 42 void *dev_priv;
44 43 struct i2c_adapter *i2c_bus;
45 struct drm_display_mode *panel_fixed_mode;
46 bool panel_wants_dither;
47}; 44};
48 45
49struct intel_dvo_dev_ops { 46struct intel_dvo_dev_ops {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9214119c0154..5e43d7076789 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -31,6 +31,7 @@
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include "drmP.h" 32#include "drmP.h"
33#include "drm.h" 33#include "drm.h"
34#include "intel_drv.h"
34#include "i915_drm.h" 35#include "i915_drm.h"
35#include "i915_drv.h" 36#include "i915_drv.h"
36 37
@@ -121,6 +122,54 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
121 return 0; 122 return 0;
122} 123}
123 124
125static int i915_gem_pageflip_info(struct seq_file *m, void *data)
126{
127 struct drm_info_node *node = (struct drm_info_node *) m->private;
128 struct drm_device *dev = node->minor->dev;
129 unsigned long flags;
130 struct intel_crtc *crtc;
131
132 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
133 const char *pipe = crtc->pipe ? "B" : "A";
134 const char *plane = crtc->plane ? "B" : "A";
135 struct intel_unpin_work *work;
136
137 spin_lock_irqsave(&dev->event_lock, flags);
138 work = crtc->unpin_work;
139 if (work == NULL) {
140 seq_printf(m, "No flip due on pipe %s (plane %s)\n",
141 pipe, plane);
142 } else {
143 if (!work->pending) {
144 seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
145 pipe, plane);
146 } else {
147 seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
148 pipe, plane);
149 }
150 if (work->enable_stall_check)
151 seq_printf(m, "Stall check enabled, ");
152 else
153 seq_printf(m, "Stall check waiting for page flip ioctl, ");
154 seq_printf(m, "%d prepares\n", work->pending);
155
156 if (work->old_fb_obj) {
157 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
158 if(obj_priv)
159 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
160 }
161 if (work->pending_flip_obj) {
162 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
163 if(obj_priv)
164 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
165 }
166 }
167 spin_unlock_irqrestore(&dev->event_lock, flags);
168 }
169
170 return 0;
171}
172
124static int i915_gem_request_info(struct seq_file *m, void *data) 173static int i915_gem_request_info(struct seq_file *m, void *data)
125{ 174{
126 struct drm_info_node *node = (struct drm_info_node *) m->private; 175 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -467,6 +516,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
467 } 516 }
468 } 517 }
469 518
519 if (error->overlay)
520 intel_overlay_print_error_state(m, error->overlay);
521
470out: 522out:
471 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 523 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
472 524
@@ -774,6 +826,7 @@ static struct drm_info_list i915_debugfs_list[] = {
774 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 826 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
775 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 827 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
776 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 828 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
829 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
777 {"i915_gem_request", i915_gem_request_info, 0}, 830 {"i915_gem_request", i915_gem_request_info, 0},
778 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 831 {"i915_gem_seqno", i915_gem_seqno_info, 0},
779 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 832 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f19ffe87af3c..9d67b4853030 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -499,6 +499,13 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
499 } 499 }
500 } 500 }
501 501
502
503 if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
504 BEGIN_LP_RING(2);
505 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
506 OUT_RING(MI_NOOP);
507 ADVANCE_LP_RING();
508 }
502 i915_emit_breadcrumb(dev); 509 i915_emit_breadcrumb(dev);
503 510
504 return 0; 511 return 0;
@@ -613,8 +620,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
613 ret = copy_from_user(cliprects, batch->cliprects, 620 ret = copy_from_user(cliprects, batch->cliprects,
614 batch->num_cliprects * 621 batch->num_cliprects *
615 sizeof(struct drm_clip_rect)); 622 sizeof(struct drm_clip_rect));
616 if (ret != 0) 623 if (ret != 0) {
624 ret = -EFAULT;
617 goto fail_free; 625 goto fail_free;
626 }
618 } 627 }
619 628
620 mutex_lock(&dev->struct_mutex); 629 mutex_lock(&dev->struct_mutex);
@@ -655,8 +664,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
655 return -ENOMEM; 664 return -ENOMEM;
656 665
657 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); 666 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
658 if (ret != 0) 667 if (ret != 0) {
668 ret = -EFAULT;
659 goto fail_batch_free; 669 goto fail_batch_free;
670 }
660 671
661 if (cmdbuf->num_cliprects) { 672 if (cmdbuf->num_cliprects) {
662 cliprects = kcalloc(cmdbuf->num_cliprects, 673 cliprects = kcalloc(cmdbuf->num_cliprects,
@@ -669,8 +680,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
669 ret = copy_from_user(cliprects, cmdbuf->cliprects, 680 ret = copy_from_user(cliprects, cmdbuf->cliprects,
670 cmdbuf->num_cliprects * 681 cmdbuf->num_cliprects *
671 sizeof(struct drm_clip_rect)); 682 sizeof(struct drm_clip_rect));
672 if (ret != 0) 683 if (ret != 0) {
684 ret = -EFAULT;
673 goto fail_clip_free; 685 goto fail_clip_free;
686 }
674 } 687 }
675 688
676 mutex_lock(&dev->struct_mutex); 689 mutex_lock(&dev->struct_mutex);
@@ -878,7 +891,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
878 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; 891 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
879 u32 temp_lo, temp_hi = 0; 892 u32 temp_lo, temp_hi = 0;
880 u64 mchbar_addr; 893 u64 mchbar_addr;
881 int ret = 0; 894 int ret;
882 895
883 if (IS_I965G(dev)) 896 if (IS_I965G(dev))
884 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 897 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
@@ -888,22 +901,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
888 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 901 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
889#ifdef CONFIG_PNP 902#ifdef CONFIG_PNP
890 if (mchbar_addr && 903 if (mchbar_addr &&
891 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { 904 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
892 ret = 0; 905 return 0;
893 goto out;
894 }
895#endif 906#endif
896 907
897 /* Get some space for it */ 908 /* Get some space for it */
898 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, 909 dev_priv->mch_res.name = "i915 MCHBAR";
910 dev_priv->mch_res.flags = IORESOURCE_MEM;
911 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
912 &dev_priv->mch_res,
899 MCHBAR_SIZE, MCHBAR_SIZE, 913 MCHBAR_SIZE, MCHBAR_SIZE,
900 PCIBIOS_MIN_MEM, 914 PCIBIOS_MIN_MEM,
901 0, pcibios_align_resource, 915 0, pcibios_align_resource,
902 dev_priv->bridge_dev); 916 dev_priv->bridge_dev);
903 if (ret) { 917 if (ret) {
904 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); 918 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
905 dev_priv->mch_res.start = 0; 919 dev_priv->mch_res.start = 0;
906 goto out; 920 return ret;
907 } 921 }
908 922
909 if (IS_I965G(dev)) 923 if (IS_I965G(dev))
@@ -912,8 +926,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
912 926
913 pci_write_config_dword(dev_priv->bridge_dev, reg, 927 pci_write_config_dword(dev_priv->bridge_dev, reg,
914 lower_32_bits(dev_priv->mch_res.start)); 928 lower_32_bits(dev_priv->mch_res.start));
915out: 929 return 0;
916 return ret;
917} 930}
918 931
919/* Setup MCHBAR if possible, return true if we should disable it again */ 932/* Setup MCHBAR if possible, return true if we should disable it again */
@@ -2075,6 +2088,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2075 goto free_priv; 2088 goto free_priv;
2076 } 2089 }
2077 2090
2091 /* overlay on gen2 is broken and can't address above 1G */
2092 if (IS_GEN2(dev))
2093 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
2094
2078 dev_priv->regs = ioremap(base, size); 2095 dev_priv->regs = ioremap(base, size);
2079 if (!dev_priv->regs) { 2096 if (!dev_priv->regs) {
2080 DRM_ERROR("failed to map registers\n"); 2097 DRM_ERROR("failed to map registers\n");
@@ -2360,46 +2377,46 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
2360} 2377}
2361 2378
2362struct drm_ioctl_desc i915_ioctls[] = { 2379struct drm_ioctl_desc i915_ioctls[] = {
2363 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2380 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2364 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 2381 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
2365 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH), 2382 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
2366 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 2383 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
2367 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 2384 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
2368 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 2385 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
2369 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH), 2386 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
2370 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2387 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2371 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH), 2388 DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
2372 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH), 2389 DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
2373 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2390 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2374 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 2391 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
2375 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 2392 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2376 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 2393 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2377 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 2394 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
2378 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 2395 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
2379 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2396 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2380 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2397 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2381 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 2398 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
2382 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), 2399 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
2383 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2400 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2384 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 2401 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
2385 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 2402 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
2386 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 2403 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
2387 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2404 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2388 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 2405 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2389 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), 2406 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
2390 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), 2407 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
2391 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), 2408 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
2392 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), 2409 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
2393 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), 2410 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
2394 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), 2411 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
2395 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), 2412 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
2396 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), 2413 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
2397 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), 2414 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
2398 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), 2415 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
2399 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), 2416 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
2400 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), 2417 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
2401 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2418 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2402 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 2419 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
2403}; 2420};
2404 2421
2405int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2422int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5044f653e8ea..216deb579785 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,91 +61,86 @@ extern int intel_agp_enabled;
61 .driver_data = (unsigned long) info } 61 .driver_data = (unsigned long) info }
62 62
63static const struct intel_device_info intel_i830_info = { 63static const struct intel_device_info intel_i830_info = {
64 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 64 .gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
65}; 65};
66 66
67static const struct intel_device_info intel_845g_info = { 67static const struct intel_device_info intel_845g_info = {
68 .is_i8xx = 1, 68 .gen = 2, .is_i8xx = 1,
69}; 69};
70 70
71static const struct intel_device_info intel_i85x_info = { 71static const struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1, 72 .gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1, 73 .cursor_needs_physical = 1,
74}; 74};
75 75
76static const struct intel_device_info intel_i865g_info = { 76static const struct intel_device_info intel_i865g_info = {
77 .is_i8xx = 1, 77 .gen = 2, .is_i8xx = 1,
78}; 78};
79 79
80static const struct intel_device_info intel_i915g_info = { 80static const struct intel_device_info intel_i915g_info = {
81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 81 .gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
82}; 82};
83static const struct intel_device_info intel_i915gm_info = { 83static const struct intel_device_info intel_i915gm_info = {
84 .is_i9xx = 1, .is_mobile = 1, 84 .gen = 3, .is_i9xx = 1, .is_mobile = 1,
85 .cursor_needs_physical = 1, 85 .cursor_needs_physical = 1,
86}; 86};
87static const struct intel_device_info intel_i945g_info = { 87static const struct intel_device_info intel_i945g_info = {
88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 88 .gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
89}; 89};
90static const struct intel_device_info intel_i945gm_info = { 90static const struct intel_device_info intel_i945gm_info = {
91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, 91 .gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
92 .has_hotplug = 1, .cursor_needs_physical = 1, 92 .has_hotplug = 1, .cursor_needs_physical = 1,
93}; 93};
94 94
95static const struct intel_device_info intel_i965g_info = { 95static const struct intel_device_info intel_i965g_info = {
96 .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 96 .gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
97 .has_hotplug = 1,
97}; 98};
98 99
99static const struct intel_device_info intel_i965gm_info = { 100static const struct intel_device_info intel_i965gm_info = {
100 .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, 101 .gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 102 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
102 .has_hotplug = 1,
103}; 103};
104 104
105static const struct intel_device_info intel_g33_info = { 105static const struct intel_device_info intel_g33_info = {
106 .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, 106 .gen = 3, .is_g33 = 1, .is_i9xx = 1,
107 .has_hotplug = 1, 107 .need_gfx_hws = 1, .has_hotplug = 1,
108}; 108};
109 109
110static const struct intel_device_info intel_g45_info = { 110static const struct intel_device_info intel_g45_info = {
111 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 111 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
112 .has_pipe_cxsr = 1, 112 .has_pipe_cxsr = 1, .has_hotplug = 1,
113 .has_hotplug = 1,
114}; 113};
115 114
116static const struct intel_device_info intel_gm45_info = { 115static const struct intel_device_info intel_gm45_info = {
117 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, 116 .gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 117 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
119 .has_pipe_cxsr = 1, 118 .has_pipe_cxsr = 1, .has_hotplug = 1,
120 .has_hotplug = 1,
121}; 119};
122 120
123static const struct intel_device_info intel_pineview_info = { 121static const struct intel_device_info intel_pineview_info = {
124 .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 122 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
125 .need_gfx_hws = 1, 123 .need_gfx_hws = 1, .has_hotplug = 1,
126 .has_hotplug = 1,
127}; 124};
128 125
129static const struct intel_device_info intel_ironlake_d_info = { 126static const struct intel_device_info intel_ironlake_d_info = {
130 .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 127 .gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
131 .has_pipe_cxsr = 1, 128 .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
132 .has_hotplug = 1,
133}; 129};
134 130
135static const struct intel_device_info intel_ironlake_m_info = { 131static const struct intel_device_info intel_ironlake_m_info = {
136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 132 .gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
137 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 133 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
138 .has_hotplug = 1,
139}; 134};
140 135
141static const struct intel_device_info intel_sandybridge_d_info = { 136static const struct intel_device_info intel_sandybridge_d_info = {
142 .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 137 .gen = 6, .is_i965g = 1, .is_i9xx = 1,
143 .has_hotplug = 1, .is_gen6 = 1, 138 .need_gfx_hws = 1, .has_hotplug = 1,
144}; 139};
145 140
146static const struct intel_device_info intel_sandybridge_m_info = { 141static const struct intel_device_info intel_sandybridge_m_info = {
147 .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, 142 .gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
148 .has_hotplug = 1, .is_gen6 = 1, 143 .need_gfx_hws = 1, .has_hotplug = 1,
149}; 144};
150 145
151static const struct pci_device_id pciidlist[] = { /* aka */ 146static const struct pci_device_id pciidlist[] = { /* aka */
@@ -180,7 +175,12 @@ static const struct pci_device_id pciidlist[] = { /* aka */
180 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 175 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
181 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 176 INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
182 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), 177 INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
178 INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
179 INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
183 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), 180 INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
181 INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
182 INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
183 INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
184 {0, 0, 0} 184 {0, 0, 0}
185}; 185};
186 186
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 906663b9929e..af4a263cf257 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -113,6 +113,9 @@ struct intel_opregion {
113 int enabled; 113 int enabled;
114}; 114};
115 115
116struct intel_overlay;
117struct intel_overlay_error_state;
118
116struct drm_i915_master_private { 119struct drm_i915_master_private {
117 drm_local_map_t *sarea; 120 drm_local_map_t *sarea;
118 struct _drm_i915_sarea *sarea_priv; 121 struct _drm_i915_sarea *sarea_priv;
@@ -166,6 +169,7 @@ struct drm_i915_error_state {
166 u32 purgeable:1; 169 u32 purgeable:1;
167 } *active_bo; 170 } *active_bo;
168 u32 active_bo_count; 171 u32 active_bo_count;
172 struct intel_overlay_error_state *overlay;
169}; 173};
170 174
171struct drm_i915_display_funcs { 175struct drm_i915_display_funcs {
@@ -186,9 +190,8 @@ struct drm_i915_display_funcs {
186 /* clock gating init */ 190 /* clock gating init */
187}; 191};
188 192
189struct intel_overlay;
190
191struct intel_device_info { 193struct intel_device_info {
194 u8 gen;
192 u8 is_mobile : 1; 195 u8 is_mobile : 1;
193 u8 is_i8xx : 1; 196 u8 is_i8xx : 1;
194 u8 is_i85x : 1; 197 u8 is_i85x : 1;
@@ -204,7 +207,6 @@ struct intel_device_info {
204 u8 is_broadwater : 1; 207 u8 is_broadwater : 1;
205 u8 is_crestline : 1; 208 u8 is_crestline : 1;
206 u8 is_ironlake : 1; 209 u8 is_ironlake : 1;
207 u8 is_gen6 : 1;
208 u8 has_fbc : 1; 210 u8 has_fbc : 1;
209 u8 has_rc6 : 1; 211 u8 has_rc6 : 1;
210 u8 has_pipe_cxsr : 1; 212 u8 has_pipe_cxsr : 1;
@@ -242,6 +244,7 @@ typedef struct drm_i915_private {
242 struct pci_dev *bridge_dev; 244 struct pci_dev *bridge_dev;
243 struct intel_ring_buffer render_ring; 245 struct intel_ring_buffer render_ring;
244 struct intel_ring_buffer bsd_ring; 246 struct intel_ring_buffer bsd_ring;
247 uint32_t next_seqno;
245 248
246 drm_dma_handle_t *status_page_dmah; 249 drm_dma_handle_t *status_page_dmah;
247 void *seqno_page; 250 void *seqno_page;
@@ -251,6 +254,7 @@ typedef struct drm_i915_private {
251 drm_local_map_t hws_map; 254 drm_local_map_t hws_map;
252 struct drm_gem_object *seqno_obj; 255 struct drm_gem_object *seqno_obj;
253 struct drm_gem_object *pwrctx; 256 struct drm_gem_object *pwrctx;
257 struct drm_gem_object *renderctx;
254 258
255 struct resource mch_res; 259 struct resource mch_res;
256 260
@@ -285,6 +289,9 @@ typedef struct drm_i915_private {
285 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; 289 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
286 int vblank_pipe; 290 int vblank_pipe;
287 int num_pipe; 291 int num_pipe;
292 u32 flush_rings;
293#define FLUSH_RENDER_RING 0x1
294#define FLUSH_BSD_RING 0x2
288 295
289 /* For hangcheck timer */ 296 /* For hangcheck timer */
290#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */ 297#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
@@ -568,8 +575,6 @@ typedef struct drm_i915_private {
568 */ 575 */
569 struct delayed_work retire_work; 576 struct delayed_work retire_work;
570 577
571 uint32_t next_gem_seqno;
572
573 /** 578 /**
574 * Waiting sequence number, if any 579 * Waiting sequence number, if any
575 */ 580 */
@@ -610,6 +615,8 @@ typedef struct drm_i915_private {
610 struct sdvo_device_mapping sdvo_mappings[2]; 615 struct sdvo_device_mapping sdvo_mappings[2];
611 /* indicate whether the LVDS_BORDER should be enabled or not */ 616 /* indicate whether the LVDS_BORDER should be enabled or not */
612 unsigned int lvds_border_bits; 617 unsigned int lvds_border_bits;
618 /* Panel fitter placement and size for Ironlake+ */
619 u32 pch_pf_pos, pch_pf_size;
613 620
614 struct drm_crtc *plane_to_crtc_mapping[2]; 621 struct drm_crtc *plane_to_crtc_mapping[2];
615 struct drm_crtc *pipe_to_crtc_mapping[2]; 622 struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -669,6 +676,8 @@ struct drm_i915_gem_object {
669 struct list_head list; 676 struct list_head list;
670 /** This object's place on GPU write list */ 677 /** This object's place on GPU write list */
671 struct list_head gpu_write_list; 678 struct list_head gpu_write_list;
679 /** This object's place on eviction list */
680 struct list_head evict_list;
672 681
673 /** 682 /**
674 * This is set if the object is on the active or flushing lists 683 * This is set if the object is on the active or flushing lists
@@ -978,6 +987,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
978void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 987void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
979int i915_gem_do_init(struct drm_device *dev, unsigned long start, 988int i915_gem_do_init(struct drm_device *dev, unsigned long start,
980 unsigned long end); 989 unsigned long end);
990int i915_gpu_idle(struct drm_device *dev);
981int i915_gem_idle(struct drm_device *dev); 991int i915_gem_idle(struct drm_device *dev);
982uint32_t i915_add_request(struct drm_device *dev, 992uint32_t i915_add_request(struct drm_device *dev,
983 struct drm_file *file_priv, 993 struct drm_file *file_priv,
@@ -991,7 +1001,9 @@ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
991 int write); 1001 int write);
992int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); 1002int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
993int i915_gem_attach_phys_object(struct drm_device *dev, 1003int i915_gem_attach_phys_object(struct drm_device *dev,
994 struct drm_gem_object *obj, int id); 1004 struct drm_gem_object *obj,
1005 int id,
1006 int align);
995void i915_gem_detach_phys_object(struct drm_device *dev, 1007void i915_gem_detach_phys_object(struct drm_device *dev,
996 struct drm_gem_object *obj); 1008 struct drm_gem_object *obj);
997void i915_gem_free_all_phys_object(struct drm_device *dev); 1009void i915_gem_free_all_phys_object(struct drm_device *dev);
@@ -1003,6 +1015,11 @@ int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
1003void i915_gem_shrinker_init(void); 1015void i915_gem_shrinker_init(void);
1004void i915_gem_shrinker_exit(void); 1016void i915_gem_shrinker_exit(void);
1005 1017
1018/* i915_gem_evict.c */
1019int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
1020int i915_gem_evict_everything(struct drm_device *dev);
1021int i915_gem_evict_inactive(struct drm_device *dev);
1022
1006/* i915_gem_tiling.c */ 1023/* i915_gem_tiling.c */
1007void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1024void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1008void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 1025void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
@@ -1066,6 +1083,10 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1066extern void intel_detect_pch (struct drm_device *dev); 1083extern void intel_detect_pch (struct drm_device *dev);
1067extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1084extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1068 1085
1086/* overlay */
1087extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1088extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
1089
1069/** 1090/**
1070 * Lock test for when it's just for synchronization of ring access. 1091 * Lock test for when it's just for synchronization of ring access.
1071 * 1092 *
@@ -1092,26 +1113,26 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1092#define I915_VERBOSE 0 1113#define I915_VERBOSE 0
1093 1114
1094#define BEGIN_LP_RING(n) do { \ 1115#define BEGIN_LP_RING(n) do { \
1095 drm_i915_private_t *dev_priv = dev->dev_private; \ 1116 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1096 if (I915_VERBOSE) \ 1117 if (I915_VERBOSE) \
1097 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ 1118 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
1098 intel_ring_begin(dev, &dev_priv->render_ring, (n)); \ 1119 intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
1099} while (0) 1120} while (0)
1100 1121
1101 1122
1102#define OUT_RING(x) do { \ 1123#define OUT_RING(x) do { \
1103 drm_i915_private_t *dev_priv = dev->dev_private; \ 1124 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1104 if (I915_VERBOSE) \ 1125 if (I915_VERBOSE) \
1105 DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ 1126 DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
1106 intel_ring_emit(dev, &dev_priv->render_ring, x); \ 1127 intel_ring_emit(dev, &dev_priv__->render_ring, x); \
1107} while (0) 1128} while (0)
1108 1129
1109#define ADVANCE_LP_RING() do { \ 1130#define ADVANCE_LP_RING() do { \
1110 drm_i915_private_t *dev_priv = dev->dev_private; \ 1131 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1111 if (I915_VERBOSE) \ 1132 if (I915_VERBOSE) \
1112 DRM_DEBUG("ADVANCE_LP_RING %x\n", \ 1133 DRM_DEBUG("ADVANCE_LP_RING %x\n", \
1113 dev_priv->render_ring.tail); \ 1134 dev_priv__->render_ring.tail); \
1114 intel_ring_advance(dev, &dev_priv->render_ring); \ 1135 intel_ring_advance(dev, &dev_priv__->render_ring); \
1115} while(0) 1136} while(0)
1116 1137
1117/** 1138/**
@@ -1141,7 +1162,6 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1141#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1162#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1142#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1163#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1143#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1164#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1144#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1145#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1165#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1146#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1166#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1147#define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1167#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1160,27 +1180,13 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1160#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1180#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1161#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) 1181#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
1162#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1182#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
1163#define IS_GEN6(dev) (INTEL_INFO(dev)->is_gen6)
1164#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1183#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1165 1184
1166#define IS_GEN3(dev) (IS_I915G(dev) || \ 1185#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1167 IS_I915GM(dev) || \ 1186#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1168 IS_I945G(dev) || \ 1187#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1169 IS_I945GM(dev) || \ 1188#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1170 IS_G33(dev) || \ 1189#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1171 IS_PINEVIEW(dev))
1172#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
1173 (dev)->pci_device == 0x2982 || \
1174 (dev)->pci_device == 0x2992 || \
1175 (dev)->pci_device == 0x29A2 || \
1176 (dev)->pci_device == 0x2A02 || \
1177 (dev)->pci_device == 0x2A12 || \
1178 (dev)->pci_device == 0x2E02 || \
1179 (dev)->pci_device == 0x2E12 || \
1180 (dev)->pci_device == 0x2E22 || \
1181 (dev)->pci_device == 0x2E32 || \
1182 (dev)->pci_device == 0x2A42 || \
1183 (dev)->pci_device == 0x2E42)
1184 1190
1185#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev)) 1191#define HAS_BSD(dev) (IS_IRONLAKE(dev) || IS_G4X(dev))
1186#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1192#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0758c7802e6b..16fca1d1799a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,7 +34,9 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/intel-gtt.h>
37 38
39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
38static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 40static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 41static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 42static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -48,8 +50,6 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
48static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 50static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
49 unsigned alignment); 51 unsigned alignment);
50static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 52static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
51static int i915_gem_evict_something(struct drm_device *dev, int min_size);
52static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
53static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 53static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
54 struct drm_i915_gem_pwrite *args, 54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file_priv); 55 struct drm_file *file_priv);
@@ -58,6 +58,14 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj);
58static LIST_HEAD(shrink_list); 58static LIST_HEAD(shrink_list);
59static DEFINE_SPINLOCK(shrink_list_lock); 59static DEFINE_SPINLOCK(shrink_list_lock);
60 60
61static inline bool
62i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
63{
64 return obj_priv->gtt_space &&
65 !obj_priv->active &&
66 obj_priv->pin_count == 0;
67}
68
61int i915_gem_do_init(struct drm_device *dev, unsigned long start, 69int i915_gem_do_init(struct drm_device *dev, unsigned long start,
62 unsigned long end) 70 unsigned long end)
63{ 71{
@@ -128,12 +136,15 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
128 return -ENOMEM; 136 return -ENOMEM;
129 137
130 ret = drm_gem_handle_create(file_priv, obj, &handle); 138 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 drm_gem_object_unreference_unlocked(obj); 139 if (ret) {
132 if (ret) 140 drm_gem_object_unreference_unlocked(obj);
133 return ret; 141 return ret;
142 }
134 143
135 args->handle = handle; 144 /* Sink the floating reference from kref_init(handlecount) */
145 drm_gem_object_handle_unreference_unlocked(obj);
136 146
147 args->handle = handle;
137 return 0; 148 return 0;
138} 149}
139 150
@@ -313,7 +324,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
313 if (ret == -ENOMEM) { 324 if (ret == -ENOMEM) {
314 struct drm_device *dev = obj->dev; 325 struct drm_device *dev = obj->dev;
315 326
316 ret = i915_gem_evict_something(dev, obj->size); 327 ret = i915_gem_evict_something(dev, obj->size,
328 i915_gem_get_gtt_alignment(obj));
317 if (ret) 329 if (ret)
318 return ret; 330 return ret;
319 331
@@ -1036,6 +1048,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1036 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1048 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1037 } 1049 }
1038 1050
1051
1052 /* Maintain LRU order of "inactive" objects */
1053 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1054 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1055
1039 drm_gem_object_unreference(obj); 1056 drm_gem_object_unreference(obj);
1040 mutex_unlock(&dev->struct_mutex); 1057 mutex_unlock(&dev->struct_mutex);
1041 return ret; 1058 return ret;
@@ -1137,7 +1154,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1137{ 1154{
1138 struct drm_gem_object *obj = vma->vm_private_data; 1155 struct drm_gem_object *obj = vma->vm_private_data;
1139 struct drm_device *dev = obj->dev; 1156 struct drm_device *dev = obj->dev;
1140 struct drm_i915_private *dev_priv = dev->dev_private; 1157 drm_i915_private_t *dev_priv = dev->dev_private;
1141 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1158 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1142 pgoff_t page_offset; 1159 pgoff_t page_offset;
1143 unsigned long pfn; 1160 unsigned long pfn;
@@ -1155,8 +1172,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1155 if (ret) 1172 if (ret)
1156 goto unlock; 1173 goto unlock;
1157 1174
1158 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1159
1160 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1175 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1161 if (ret) 1176 if (ret)
1162 goto unlock; 1177 goto unlock;
@@ -1169,6 +1184,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1169 goto unlock; 1184 goto unlock;
1170 } 1185 }
1171 1186
1187 if (i915_gem_object_is_inactive(obj_priv))
1188 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1189
1172 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 1190 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1173 page_offset; 1191 page_offset;
1174 1192
@@ -1363,7 +1381,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1363 struct drm_file *file_priv) 1381 struct drm_file *file_priv)
1364{ 1382{
1365 struct drm_i915_gem_mmap_gtt *args = data; 1383 struct drm_i915_gem_mmap_gtt *args = data;
1366 struct drm_i915_private *dev_priv = dev->dev_private;
1367 struct drm_gem_object *obj; 1384 struct drm_gem_object *obj;
1368 struct drm_i915_gem_object *obj_priv; 1385 struct drm_i915_gem_object *obj_priv;
1369 int ret; 1386 int ret;
@@ -1409,7 +1426,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1409 mutex_unlock(&dev->struct_mutex); 1426 mutex_unlock(&dev->struct_mutex);
1410 return ret; 1427 return ret;
1411 } 1428 }
1412 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1413 } 1429 }
1414 1430
1415 drm_gem_object_unreference(obj); 1431 drm_gem_object_unreference(obj);
@@ -1493,9 +1509,16 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
1493 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1509 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1494 struct inode *inode; 1510 struct inode *inode;
1495 1511
1512 /* Our goal here is to return as much of the memory as
1513 * is possible back to the system as we are called from OOM.
1514 * To do this we must instruct the shmfs to drop all of its
1515 * backing pages, *now*. Here we mirror the actions taken
1516 * when by shmem_delete_inode() to release the backing store.
1517 */
1496 inode = obj->filp->f_path.dentry->d_inode; 1518 inode = obj->filp->f_path.dentry->d_inode;
1497 if (inode->i_op->truncate) 1519 truncate_inode_pages(inode->i_mapping, 0);
1498 inode->i_op->truncate (inode); 1520 if (inode->i_op->truncate_range)
1521 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1499 1522
1500 obj_priv->madv = __I915_MADV_PURGED; 1523 obj_priv->madv = __I915_MADV_PURGED;
1501} 1524}
@@ -1887,19 +1910,6 @@ i915_gem_flush(struct drm_device *dev,
1887 flush_domains); 1910 flush_domains);
1888} 1911}
1889 1912
1890static void
1891i915_gem_flush_ring(struct drm_device *dev,
1892 uint32_t invalidate_domains,
1893 uint32_t flush_domains,
1894 struct intel_ring_buffer *ring)
1895{
1896 if (flush_domains & I915_GEM_DOMAIN_CPU)
1897 drm_agp_chipset_flush(dev);
1898 ring->flush(dev, ring,
1899 invalidate_domains,
1900 flush_domains);
1901}
1902
1903/** 1913/**
1904 * Ensures that all rendering to the object has completed and the object is 1914 * Ensures that all rendering to the object has completed and the object is
1905 * safe to unbind from the GTT or access from the CPU. 1915 * safe to unbind from the GTT or access from the CPU.
@@ -1973,8 +1983,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1973 * cause memory corruption through use-after-free. 1983 * cause memory corruption through use-after-free.
1974 */ 1984 */
1975 1985
1976 BUG_ON(obj_priv->active);
1977
1978 /* release the fence reg _after_ flushing */ 1986 /* release the fence reg _after_ flushing */
1979 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1987 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1980 i915_gem_clear_fence_reg(obj); 1988 i915_gem_clear_fence_reg(obj);
@@ -2010,34 +2018,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2010 return ret; 2018 return ret;
2011} 2019}
2012 2020
2013static struct drm_gem_object * 2021int
2014i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
2015{
2016 drm_i915_private_t *dev_priv = dev->dev_private;
2017 struct drm_i915_gem_object *obj_priv;
2018 struct drm_gem_object *best = NULL;
2019 struct drm_gem_object *first = NULL;
2020
2021 /* Try to find the smallest clean object */
2022 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2023 struct drm_gem_object *obj = &obj_priv->base;
2024 if (obj->size >= min_size) {
2025 if ((!obj_priv->dirty ||
2026 i915_gem_object_is_purgeable(obj_priv)) &&
2027 (!best || obj->size < best->size)) {
2028 best = obj;
2029 if (best->size == min_size)
2030 return best;
2031 }
2032 if (!first)
2033 first = obj;
2034 }
2035 }
2036
2037 return best ? best : first;
2038}
2039
2040static int
2041i915_gpu_idle(struct drm_device *dev) 2022i915_gpu_idle(struct drm_device *dev)
2042{ 2023{
2043 drm_i915_private_t *dev_priv = dev->dev_private; 2024 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2078,155 +2059,6 @@ i915_gpu_idle(struct drm_device *dev)
2078 return ret; 2059 return ret;
2079} 2060}
2080 2061
2081static int
2082i915_gem_evict_everything(struct drm_device *dev)
2083{
2084 drm_i915_private_t *dev_priv = dev->dev_private;
2085 int ret;
2086 bool lists_empty;
2087
2088 spin_lock(&dev_priv->mm.active_list_lock);
2089 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2090 list_empty(&dev_priv->mm.flushing_list) &&
2091 list_empty(&dev_priv->render_ring.active_list) &&
2092 (!HAS_BSD(dev)
2093 || list_empty(&dev_priv->bsd_ring.active_list)));
2094 spin_unlock(&dev_priv->mm.active_list_lock);
2095
2096 if (lists_empty)
2097 return -ENOSPC;
2098
2099 /* Flush everything (on to the inactive lists) and evict */
2100 ret = i915_gpu_idle(dev);
2101 if (ret)
2102 return ret;
2103
2104 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2105
2106 ret = i915_gem_evict_from_inactive_list(dev);
2107 if (ret)
2108 return ret;
2109
2110 spin_lock(&dev_priv->mm.active_list_lock);
2111 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2112 list_empty(&dev_priv->mm.flushing_list) &&
2113 list_empty(&dev_priv->render_ring.active_list) &&
2114 (!HAS_BSD(dev)
2115 || list_empty(&dev_priv->bsd_ring.active_list)));
2116 spin_unlock(&dev_priv->mm.active_list_lock);
2117 BUG_ON(!lists_empty);
2118
2119 return 0;
2120}
2121
2122static int
2123i915_gem_evict_something(struct drm_device *dev, int min_size)
2124{
2125 drm_i915_private_t *dev_priv = dev->dev_private;
2126 struct drm_gem_object *obj;
2127 int ret;
2128
2129 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2130 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
2131 for (;;) {
2132 i915_gem_retire_requests(dev);
2133
2134 /* If there's an inactive buffer available now, grab it
2135 * and be done.
2136 */
2137 obj = i915_gem_find_inactive_object(dev, min_size);
2138 if (obj) {
2139 struct drm_i915_gem_object *obj_priv;
2140
2141#if WATCH_LRU
2142 DRM_INFO("%s: evicting %p\n", __func__, obj);
2143#endif
2144 obj_priv = to_intel_bo(obj);
2145 BUG_ON(obj_priv->pin_count != 0);
2146 BUG_ON(obj_priv->active);
2147
2148 /* Wait on the rendering and unbind the buffer. */
2149 return i915_gem_object_unbind(obj);
2150 }
2151
2152 /* If we didn't get anything, but the ring is still processing
2153 * things, wait for the next to finish and hopefully leave us
2154 * a buffer to evict.
2155 */
2156 if (!list_empty(&render_ring->request_list)) {
2157 struct drm_i915_gem_request *request;
2158
2159 request = list_first_entry(&render_ring->request_list,
2160 struct drm_i915_gem_request,
2161 list);
2162
2163 ret = i915_wait_request(dev,
2164 request->seqno, request->ring);
2165 if (ret)
2166 return ret;
2167
2168 continue;
2169 }
2170
2171 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
2172 struct drm_i915_gem_request *request;
2173
2174 request = list_first_entry(&bsd_ring->request_list,
2175 struct drm_i915_gem_request,
2176 list);
2177
2178 ret = i915_wait_request(dev,
2179 request->seqno, request->ring);
2180 if (ret)
2181 return ret;
2182
2183 continue;
2184 }
2185
2186 /* If we didn't have anything on the request list but there
2187 * are buffers awaiting a flush, emit one and try again.
2188 * When we wait on it, those buffers waiting for that flush
2189 * will get moved to inactive.
2190 */
2191 if (!list_empty(&dev_priv->mm.flushing_list)) {
2192 struct drm_i915_gem_object *obj_priv;
2193
2194 /* Find an object that we can immediately reuse */
2195 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2196 obj = &obj_priv->base;
2197 if (obj->size >= min_size)
2198 break;
2199
2200 obj = NULL;
2201 }
2202
2203 if (obj != NULL) {
2204 uint32_t seqno;
2205
2206 i915_gem_flush_ring(dev,
2207 obj->write_domain,
2208 obj->write_domain,
2209 obj_priv->ring);
2210 seqno = i915_add_request(dev, NULL,
2211 obj->write_domain,
2212 obj_priv->ring);
2213 if (seqno == 0)
2214 return -ENOMEM;
2215 continue;
2216 }
2217 }
2218
2219 /* If we didn't do any of the above, there's no single buffer
2220 * large enough to swap out for the new one, so just evict
2221 * everything and start again. (This should be rare.)
2222 */
2223 if (!list_empty (&dev_priv->mm.inactive_list))
2224 return i915_gem_evict_from_inactive_list(dev);
2225 else
2226 return i915_gem_evict_everything(dev);
2227 }
2228}
2229
2230int 2062int
2231i915_gem_object_get_pages(struct drm_gem_object *obj, 2063i915_gem_object_get_pages(struct drm_gem_object *obj,
2232 gfp_t gfpmask) 2064 gfp_t gfpmask)
@@ -2666,7 +2498,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2666#if WATCH_LRU 2498#if WATCH_LRU
2667 DRM_INFO("%s: GTT full, evicting something\n", __func__); 2499 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2668#endif 2500#endif
2669 ret = i915_gem_evict_something(dev, obj->size); 2501 ret = i915_gem_evict_something(dev, obj->size, alignment);
2670 if (ret) 2502 if (ret)
2671 return ret; 2503 return ret;
2672 2504
@@ -2684,7 +2516,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2684 2516
2685 if (ret == -ENOMEM) { 2517 if (ret == -ENOMEM) {
2686 /* first try to clear up some space from the GTT */ 2518 /* first try to clear up some space from the GTT */
2687 ret = i915_gem_evict_something(dev, obj->size); 2519 ret = i915_gem_evict_something(dev, obj->size,
2520 alignment);
2688 if (ret) { 2521 if (ret) {
2689 /* now try to shrink everyone else */ 2522 /* now try to shrink everyone else */
2690 if (gfpmask) { 2523 if (gfpmask) {
@@ -2714,7 +2547,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2714 drm_mm_put_block(obj_priv->gtt_space); 2547 drm_mm_put_block(obj_priv->gtt_space);
2715 obj_priv->gtt_space = NULL; 2548 obj_priv->gtt_space = NULL;
2716 2549
2717 ret = i915_gem_evict_something(dev, obj->size); 2550 ret = i915_gem_evict_something(dev, obj->size, alignment);
2718 if (ret) 2551 if (ret)
2719 return ret; 2552 return ret;
2720 2553
@@ -2723,6 +2556,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2723 atomic_inc(&dev->gtt_count); 2556 atomic_inc(&dev->gtt_count);
2724 atomic_add(obj->size, &dev->gtt_memory); 2557 atomic_add(obj->size, &dev->gtt_memory);
2725 2558
2559 /* keep track of bounds object by adding it to the inactive list */
2560 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
2561
2726 /* Assert that the object is not currently in any GPU domain. As it 2562 /* Assert that the object is not currently in any GPU domain. As it
2727 * wasn't in the GTT, there shouldn't be any way it could have been in 2563 * wasn't in the GTT, there shouldn't be any way it could have been in
2728 * a GPU cache 2564 * a GPU cache
@@ -3117,6 +2953,7 @@ static void
3117i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 2953i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3118{ 2954{
3119 struct drm_device *dev = obj->dev; 2955 struct drm_device *dev = obj->dev;
2956 drm_i915_private_t *dev_priv = dev->dev_private;
3120 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2957 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3121 uint32_t invalidate_domains = 0; 2958 uint32_t invalidate_domains = 0;
3122 uint32_t flush_domains = 0; 2959 uint32_t flush_domains = 0;
@@ -3179,6 +3016,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3179 obj->pending_write_domain = obj->write_domain; 3016 obj->pending_write_domain = obj->write_domain;
3180 obj->read_domains = obj->pending_read_domains; 3017 obj->read_domains = obj->pending_read_domains;
3181 3018
3019 if (flush_domains & I915_GEM_GPU_DOMAINS) {
3020 if (obj_priv->ring == &dev_priv->render_ring)
3021 dev_priv->flush_rings |= FLUSH_RENDER_RING;
3022 else if (obj_priv->ring == &dev_priv->bsd_ring)
3023 dev_priv->flush_rings |= FLUSH_BSD_RING;
3024 }
3025
3182 dev->invalidate_domains |= invalidate_domains; 3026 dev->invalidate_domains |= invalidate_domains;
3183 dev->flush_domains |= flush_domains; 3027 dev->flush_domains |= flush_domains;
3184#if WATCH_BUF 3028#if WATCH_BUF
@@ -3718,7 +3562,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3718 ring = &dev_priv->render_ring; 3562 ring = &dev_priv->render_ring;
3719 } 3563 }
3720 3564
3721
3722 if (args->buffer_count < 1) { 3565 if (args->buffer_count < 1) {
3723 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3566 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3724 return -EINVAL; 3567 return -EINVAL;
@@ -3746,6 +3589,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3746 if (ret != 0) { 3589 if (ret != 0) {
3747 DRM_ERROR("copy %d cliprects failed: %d\n", 3590 DRM_ERROR("copy %d cliprects failed: %d\n",
3748 args->num_cliprects, ret); 3591 args->num_cliprects, ret);
3592 ret = -EFAULT;
3749 goto pre_mutex_err; 3593 goto pre_mutex_err;
3750 } 3594 }
3751 } 3595 }
@@ -3892,6 +3736,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3892 */ 3736 */
3893 dev->invalidate_domains = 0; 3737 dev->invalidate_domains = 0;
3894 dev->flush_domains = 0; 3738 dev->flush_domains = 0;
3739 dev_priv->flush_rings = 0;
3895 3740
3896 for (i = 0; i < args->buffer_count; i++) { 3741 for (i = 0; i < args->buffer_count; i++) {
3897 struct drm_gem_object *obj = object_list[i]; 3742 struct drm_gem_object *obj = object_list[i];
@@ -3912,16 +3757,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3912 i915_gem_flush(dev, 3757 i915_gem_flush(dev,
3913 dev->invalidate_domains, 3758 dev->invalidate_domains,
3914 dev->flush_domains); 3759 dev->flush_domains);
3915 if (dev->flush_domains & I915_GEM_GPU_DOMAINS) { 3760 if (dev_priv->flush_rings & FLUSH_RENDER_RING)
3916 (void)i915_add_request(dev, file_priv, 3761 (void)i915_add_request(dev, file_priv,
3917 dev->flush_domains, 3762 dev->flush_domains,
3918 &dev_priv->render_ring); 3763 &dev_priv->render_ring);
3919 3764 if (dev_priv->flush_rings & FLUSH_BSD_RING)
3920 if (HAS_BSD(dev)) 3765 (void)i915_add_request(dev, file_priv,
3921 (void)i915_add_request(dev, file_priv, 3766 dev->flush_domains,
3922 dev->flush_domains, 3767 &dev_priv->bsd_ring);
3923 &dev_priv->bsd_ring);
3924 }
3925 } 3768 }
3926 3769
3927 for (i = 0; i < args->buffer_count; i++) { 3770 for (i = 0; i < args->buffer_count; i++) {
@@ -4192,6 +4035,10 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4192 if (alignment == 0) 4035 if (alignment == 0)
4193 alignment = i915_gem_get_gtt_alignment(obj); 4036 alignment = i915_gem_get_gtt_alignment(obj);
4194 if (obj_priv->gtt_offset & (alignment - 1)) { 4037 if (obj_priv->gtt_offset & (alignment - 1)) {
4038 WARN(obj_priv->pin_count,
4039 "bo is already pinned with incorrect alignment:"
4040 " offset=%x, req.alignment=%x\n",
4041 obj_priv->gtt_offset, alignment);
4195 ret = i915_gem_object_unbind(obj); 4042 ret = i915_gem_object_unbind(obj);
4196 if (ret) 4043 if (ret)
4197 return ret; 4044 return ret;
@@ -4213,8 +4060,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4213 atomic_inc(&dev->pin_count); 4060 atomic_inc(&dev->pin_count);
4214 atomic_add(obj->size, &dev->pin_memory); 4061 atomic_add(obj->size, &dev->pin_memory);
4215 if (!obj_priv->active && 4062 if (!obj_priv->active &&
4216 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 && 4063 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
4217 !list_empty(&obj_priv->list))
4218 list_del_init(&obj_priv->list); 4064 list_del_init(&obj_priv->list);
4219 } 4065 }
4220 i915_verify_inactive(dev, __FILE__, __LINE__); 4066 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4359,22 +4205,34 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4359 } 4205 }
4360 4206
4361 mutex_lock(&dev->struct_mutex); 4207 mutex_lock(&dev->struct_mutex);
4362 /* Update the active list for the hardware's current position.
4363 * Otherwise this only updates on a delayed timer or when irqs are
4364 * actually unmasked, and our working set ends up being larger than
4365 * required.
4366 */
4367 i915_gem_retire_requests(dev);
4368 4208
4369 obj_priv = to_intel_bo(obj); 4209 /* Count all active objects as busy, even if they are currently not used
4370 /* Don't count being on the flushing list against the object being 4210 * by the gpu. Users of this interface expect objects to eventually
4371 * done. Otherwise, a buffer left on the flushing list but not getting 4211 * become non-busy without any further actions, therefore emit any
4372 * flushed (because nobody's flushing that domain) won't ever return 4212 * necessary flushes here.
4373 * unbusy and get reused by libdrm's bo cache. The other expected
4374 * consumer of this interface, OpenGL's occlusion queries, also specs
4375 * that the objects get unbusy "eventually" without any interference.
4376 */ 4213 */
4377 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0; 4214 obj_priv = to_intel_bo(obj);
4215 args->busy = obj_priv->active;
4216 if (args->busy) {
4217 /* Unconditionally flush objects, even when the gpu still uses this
4218 * object. Userspace calling this function indicates that it wants to
4219 * use this buffer rather sooner than later, so issuing the required
4220 * flush earlier is beneficial.
4221 */
4222 if (obj->write_domain) {
4223 i915_gem_flush(dev, 0, obj->write_domain);
4224 (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
4225 }
4226
4227 /* Update the active list for the hardware's current position.
4228 * Otherwise this only updates on a delayed timer or when irqs
4229 * are actually unmasked, and our working set ends up being
4230 * larger than required.
4231 */
4232 i915_gem_retire_requests_ring(dev, obj_priv->ring);
4233
4234 args->busy = obj_priv->active;
4235 }
4378 4236
4379 drm_gem_object_unreference(obj); 4237 drm_gem_object_unreference(obj);
4380 mutex_unlock(&dev->struct_mutex); 4238 mutex_unlock(&dev->struct_mutex);
@@ -4514,30 +4372,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
4514 i915_gem_free_object_tail(obj); 4372 i915_gem_free_object_tail(obj);
4515} 4373}
4516 4374
4517/** Unbinds all inactive objects. */
4518static int
4519i915_gem_evict_from_inactive_list(struct drm_device *dev)
4520{
4521 drm_i915_private_t *dev_priv = dev->dev_private;
4522
4523 while (!list_empty(&dev_priv->mm.inactive_list)) {
4524 struct drm_gem_object *obj;
4525 int ret;
4526
4527 obj = &list_first_entry(&dev_priv->mm.inactive_list,
4528 struct drm_i915_gem_object,
4529 list)->base;
4530
4531 ret = i915_gem_object_unbind(obj);
4532 if (ret != 0) {
4533 DRM_ERROR("Error unbinding object: %d\n", ret);
4534 return ret;
4535 }
4536 }
4537
4538 return 0;
4539}
4540
4541int 4375int
4542i915_gem_idle(struct drm_device *dev) 4376i915_gem_idle(struct drm_device *dev)
4543{ 4377{
@@ -4562,7 +4396,7 @@ i915_gem_idle(struct drm_device *dev)
4562 4396
4563 /* Under UMS, be paranoid and evict. */ 4397 /* Under UMS, be paranoid and evict. */
4564 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 4398 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4565 ret = i915_gem_evict_from_inactive_list(dev); 4399 ret = i915_gem_evict_inactive(dev);
4566 if (ret) { 4400 if (ret) {
4567 mutex_unlock(&dev->struct_mutex); 4401 mutex_unlock(&dev->struct_mutex);
4568 return ret; 4402 return ret;
@@ -4680,6 +4514,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4680 goto cleanup_render_ring; 4514 goto cleanup_render_ring;
4681 } 4515 }
4682 4516
4517 dev_priv->next_seqno = 1;
4518
4683 return 0; 4519 return 0;
4684 4520
4685cleanup_render_ring: 4521cleanup_render_ring:
@@ -4841,7 +4677,7 @@ i915_gem_load(struct drm_device *dev)
4841 * e.g. for cursor + overlay regs 4677 * e.g. for cursor + overlay regs
4842 */ 4678 */
4843int i915_gem_init_phys_object(struct drm_device *dev, 4679int i915_gem_init_phys_object(struct drm_device *dev,
4844 int id, int size) 4680 int id, int size, int align)
4845{ 4681{
4846 drm_i915_private_t *dev_priv = dev->dev_private; 4682 drm_i915_private_t *dev_priv = dev->dev_private;
4847 struct drm_i915_gem_phys_object *phys_obj; 4683 struct drm_i915_gem_phys_object *phys_obj;
@@ -4856,7 +4692,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
4856 4692
4857 phys_obj->id = id; 4693 phys_obj->id = id;
4858 4694
4859 phys_obj->handle = drm_pci_alloc(dev, size, 0); 4695 phys_obj->handle = drm_pci_alloc(dev, size, align);
4860 if (!phys_obj->handle) { 4696 if (!phys_obj->handle) {
4861 ret = -ENOMEM; 4697 ret = -ENOMEM;
4862 goto kfree_obj; 4698 goto kfree_obj;
@@ -4938,7 +4774,9 @@ out:
4938 4774
4939int 4775int
4940i915_gem_attach_phys_object(struct drm_device *dev, 4776i915_gem_attach_phys_object(struct drm_device *dev,
4941 struct drm_gem_object *obj, int id) 4777 struct drm_gem_object *obj,
4778 int id,
4779 int align)
4942{ 4780{
4943 drm_i915_private_t *dev_priv = dev->dev_private; 4781 drm_i915_private_t *dev_priv = dev->dev_private;
4944 struct drm_i915_gem_object *obj_priv; 4782 struct drm_i915_gem_object *obj_priv;
@@ -4957,11 +4795,10 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4957 i915_gem_detach_phys_object(dev, obj); 4795 i915_gem_detach_phys_object(dev, obj);
4958 } 4796 }
4959 4797
4960
4961 /* create a new object */ 4798 /* create a new object */
4962 if (!dev_priv->mm.phys_objs[id - 1]) { 4799 if (!dev_priv->mm.phys_objs[id - 1]) {
4963 ret = i915_gem_init_phys_object(dev, id, 4800 ret = i915_gem_init_phys_object(dev, id,
4964 obj->size); 4801 obj->size, align);
4965 if (ret) { 4802 if (ret) {
4966 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); 4803 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4967 goto out; 4804 goto out;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
new file mode 100644
index 000000000000..72cae3cccad8
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drv.h"
32#include "i915_drm.h"
33
34static struct drm_i915_gem_object *
35i915_gem_next_active_object(struct drm_device *dev,
36 struct list_head **render_iter,
37 struct list_head **bsd_iter)
38{
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
41
42 if (*render_iter != &dev_priv->render_ring.active_list)
43 render_obj = list_entry(*render_iter,
44 struct drm_i915_gem_object,
45 list);
46
47 if (HAS_BSD(dev)) {
48 if (*bsd_iter != &dev_priv->bsd_ring.active_list)
49 bsd_obj = list_entry(*bsd_iter,
50 struct drm_i915_gem_object,
51 list);
52
53 if (render_obj == NULL) {
54 *bsd_iter = (*bsd_iter)->next;
55 return bsd_obj;
56 }
57
58 if (bsd_obj == NULL) {
59 *render_iter = (*render_iter)->next;
60 return render_obj;
61 }
62
63 /* XXX can we handle seqno wrapping? */
64 if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
65 *render_iter = (*render_iter)->next;
66 return render_obj;
67 } else {
68 *bsd_iter = (*bsd_iter)->next;
69 return bsd_obj;
70 }
71 } else {
72 *render_iter = (*render_iter)->next;
73 return render_obj;
74 }
75}
76
77static bool
78mark_free(struct drm_i915_gem_object *obj_priv,
79 struct list_head *unwind)
80{
81 list_add(&obj_priv->evict_list, unwind);
82 return drm_mm_scan_add_block(obj_priv->gtt_space);
83}
84
85#define i915_for_each_active_object(OBJ, R, B) \
86 *(R) = dev_priv->render_ring.active_list.next; \
87 *(B) = dev_priv->bsd_ring.active_list.next; \
88 while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
89
90int
91i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
92{
93 drm_i915_private_t *dev_priv = dev->dev_private;
94 struct list_head eviction_list, unwind_list;
95 struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
96 struct list_head *render_iter, *bsd_iter;
97 int ret = 0;
98
99 i915_gem_retire_requests(dev);
100
101 /* Re-check for free space after retiring requests */
102 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
103 min_size, alignment, 0))
104 return 0;
105
106 /*
107 * The goal is to evict objects and amalgamate space in LRU order.
108 * The oldest idle objects reside on the inactive list, which is in
109 * retirement order. The next objects to retire are those on the (per
110 * ring) active list that do not have an outstanding flush. Once the
111 * hardware reports completion (the seqno is updated after the
112 * batchbuffer has been finished) the clean buffer objects would
113 * be retired to the inactive list. Any dirty objects would be added
114 * to the tail of the flushing list. So after processing the clean
115 * active objects we need to emit a MI_FLUSH to retire the flushing
116 * list, hence the retirement order of the flushing list is in
117 * advance of the dirty objects on the active lists.
118 *
119 * The retirement sequence is thus:
120 * 1. Inactive objects (already retired)
121 * 2. Clean active objects
122 * 3. Flushing list
123 * 4. Dirty active objects.
124 *
125 * On each list, the oldest objects lie at the HEAD with the freshest
126 * object on the TAIL.
127 */
128
129 INIT_LIST_HEAD(&unwind_list);
130 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
131
132 /* First see if there is a large enough contiguous idle region... */
133 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
134 if (mark_free(obj_priv, &unwind_list))
135 goto found;
136 }
137
138 /* Now merge in the soon-to-be-expired objects... */
139 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
140 /* Does the object require an outstanding flush? */
141 if (obj_priv->base.write_domain || obj_priv->pin_count)
142 continue;
143
144 if (mark_free(obj_priv, &unwind_list))
145 goto found;
146 }
147
148 /* Finally add anything with a pending flush (in order of retirement) */
149 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
150 if (obj_priv->pin_count)
151 continue;
152
153 if (mark_free(obj_priv, &unwind_list))
154 goto found;
155 }
156 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
157 if (! obj_priv->base.write_domain || obj_priv->pin_count)
158 continue;
159
160 if (mark_free(obj_priv, &unwind_list))
161 goto found;
162 }
163
164 /* Nothing found, clean up and bail out! */
165 list_for_each_entry(obj_priv, &unwind_list, evict_list) {
166 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
167 BUG_ON(ret);
168 }
169
170 /* We expect the caller to unpin, evict all and try again, or give up.
171 * So calling i915_gem_evict_everything() is unnecessary.
172 */
173 return -ENOSPC;
174
175found:
176 INIT_LIST_HEAD(&eviction_list);
177 list_for_each_entry_safe(obj_priv, tmp_obj_priv,
178 &unwind_list, evict_list) {
179 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
180 /* drm_mm doesn't allow any other other operations while
181 * scanning, therefore store to be evicted objects on a
182 * temporary list. */
183 list_move(&obj_priv->evict_list, &eviction_list);
184 }
185 }
186
187 /* Unbinding will emit any required flushes */
188 list_for_each_entry_safe(obj_priv, tmp_obj_priv,
189 &eviction_list, evict_list) {
190#if WATCH_LRU
191 DRM_INFO("%s: evicting %p\n", __func__, obj);
192#endif
193 ret = i915_gem_object_unbind(&obj_priv->base);
194 if (ret)
195 return ret;
196 }
197
198 /* The just created free hole should be on the top of the free stack
199 * maintained by drm_mm, so this BUG_ON actually executes in O(1).
200 * Furthermore all accessed data has just recently been used, so it
201 * should be really fast, too. */
202 BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
203 alignment, 0));
204
205 return 0;
206}
207
208int
209i915_gem_evict_everything(struct drm_device *dev)
210{
211 drm_i915_private_t *dev_priv = dev->dev_private;
212 int ret;
213 bool lists_empty;
214
215 spin_lock(&dev_priv->mm.active_list_lock);
216 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
217 list_empty(&dev_priv->mm.flushing_list) &&
218 list_empty(&dev_priv->render_ring.active_list) &&
219 (!HAS_BSD(dev)
220 || list_empty(&dev_priv->bsd_ring.active_list)));
221 spin_unlock(&dev_priv->mm.active_list_lock);
222
223 if (lists_empty)
224 return -ENOSPC;
225
226 /* Flush everything (on to the inactive lists) and evict */
227 ret = i915_gpu_idle(dev);
228 if (ret)
229 return ret;
230
231 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
232
233 ret = i915_gem_evict_inactive(dev);
234 if (ret)
235 return ret;
236
237 spin_lock(&dev_priv->mm.active_list_lock);
238 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
239 list_empty(&dev_priv->mm.flushing_list) &&
240 list_empty(&dev_priv->render_ring.active_list) &&
241 (!HAS_BSD(dev)
242 || list_empty(&dev_priv->bsd_ring.active_list)));
243 spin_unlock(&dev_priv->mm.active_list_lock);
244 BUG_ON(!lists_empty);
245
246 return 0;
247}
248
249/** Unbinds all inactive objects. */
250int
251i915_gem_evict_inactive(struct drm_device *dev)
252{
253 drm_i915_private_t *dev_priv = dev->dev_private;
254
255 while (!list_empty(&dev_priv->mm.inactive_list)) {
256 struct drm_gem_object *obj;
257 int ret;
258
259 obj = &list_first_entry(&dev_priv->mm.inactive_list,
260 struct drm_i915_gem_object,
261 list)->base;
262
263 ret = i915_gem_object_unbind(obj);
264 if (ret != 0) {
265 DRM_ERROR("Error unbinding object: %d\n", ret);
266 return ret;
267 }
268 }
269
270 return 0;
271}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85785a8844ed..59457e83b011 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -425,9 +425,11 @@ static struct drm_i915_error_object *
425i915_error_object_create(struct drm_device *dev, 425i915_error_object_create(struct drm_device *dev,
426 struct drm_gem_object *src) 426 struct drm_gem_object *src)
427{ 427{
428 drm_i915_private_t *dev_priv = dev->dev_private;
428 struct drm_i915_error_object *dst; 429 struct drm_i915_error_object *dst;
429 struct drm_i915_gem_object *src_priv; 430 struct drm_i915_gem_object *src_priv;
430 int page, page_count; 431 int page, page_count;
432 u32 reloc_offset;
431 433
432 if (src == NULL) 434 if (src == NULL)
433 return NULL; 435 return NULL;
@@ -442,18 +444,27 @@ i915_error_object_create(struct drm_device *dev,
442 if (dst == NULL) 444 if (dst == NULL)
443 return NULL; 445 return NULL;
444 446
447 reloc_offset = src_priv->gtt_offset;
445 for (page = 0; page < page_count; page++) { 448 for (page = 0; page < page_count; page++) {
446 void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
447 unsigned long flags; 449 unsigned long flags;
450 void __iomem *s;
451 void *d;
448 452
453 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
449 if (d == NULL) 454 if (d == NULL)
450 goto unwind; 455 goto unwind;
456
451 local_irq_save(flags); 457 local_irq_save(flags);
452 s = kmap_atomic(src_priv->pages[page], KM_IRQ0); 458 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
453 memcpy(d, s, PAGE_SIZE); 459 reloc_offset,
454 kunmap_atomic(s, KM_IRQ0); 460 KM_IRQ0);
461 memcpy_fromio(d, s, PAGE_SIZE);
462 io_mapping_unmap_atomic(s, KM_IRQ0);
455 local_irq_restore(flags); 463 local_irq_restore(flags);
464
456 dst->pages[page] = d; 465 dst->pages[page] = d;
466
467 reloc_offset += PAGE_SIZE;
457 } 468 }
458 dst->page_count = page_count; 469 dst->page_count = page_count;
459 dst->gtt_offset = src_priv->gtt_offset; 470 dst->gtt_offset = src_priv->gtt_offset;
@@ -489,6 +500,7 @@ i915_error_state_free(struct drm_device *dev,
489 i915_error_object_free(error->batchbuffer[1]); 500 i915_error_object_free(error->batchbuffer[1]);
490 i915_error_object_free(error->ringbuffer); 501 i915_error_object_free(error->ringbuffer);
491 kfree(error->active_bo); 502 kfree(error->active_bo);
503 kfree(error->overlay);
492 kfree(error); 504 kfree(error);
493} 505}
494 506
@@ -612,18 +624,57 @@ static void i915_capture_error_state(struct drm_device *dev)
612 624
613 if (batchbuffer[1] == NULL && 625 if (batchbuffer[1] == NULL &&
614 error->acthd >= obj_priv->gtt_offset && 626 error->acthd >= obj_priv->gtt_offset &&
615 error->acthd < obj_priv->gtt_offset + obj->size && 627 error->acthd < obj_priv->gtt_offset + obj->size)
616 batchbuffer[0] != obj)
617 batchbuffer[1] = obj; 628 batchbuffer[1] = obj;
618 629
619 count++; 630 count++;
620 } 631 }
632 /* Scan the other lists for completeness for those bizarre errors. */
633 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
634 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
635 struct drm_gem_object *obj = &obj_priv->base;
636
637 if (batchbuffer[0] == NULL &&
638 bbaddr >= obj_priv->gtt_offset &&
639 bbaddr < obj_priv->gtt_offset + obj->size)
640 batchbuffer[0] = obj;
641
642 if (batchbuffer[1] == NULL &&
643 error->acthd >= obj_priv->gtt_offset &&
644 error->acthd < obj_priv->gtt_offset + obj->size)
645 batchbuffer[1] = obj;
646
647 if (batchbuffer[0] && batchbuffer[1])
648 break;
649 }
650 }
651 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
652 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
653 struct drm_gem_object *obj = &obj_priv->base;
654
655 if (batchbuffer[0] == NULL &&
656 bbaddr >= obj_priv->gtt_offset &&
657 bbaddr < obj_priv->gtt_offset + obj->size)
658 batchbuffer[0] = obj;
659
660 if (batchbuffer[1] == NULL &&
661 error->acthd >= obj_priv->gtt_offset &&
662 error->acthd < obj_priv->gtt_offset + obj->size)
663 batchbuffer[1] = obj;
664
665 if (batchbuffer[0] && batchbuffer[1])
666 break;
667 }
668 }
621 669
622 /* We need to copy these to an anonymous buffer as the simplest 670 /* We need to copy these to an anonymous buffer as the simplest
623 * method to avoid being overwritten by userpace. 671 * method to avoid being overwritten by userpace.
624 */ 672 */
625 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); 673 error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
626 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); 674 if (batchbuffer[1] != batchbuffer[0])
675 error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
676 else
677 error->batchbuffer[1] = NULL;
627 678
628 /* Record the ringbuffer */ 679 /* Record the ringbuffer */
629 error->ringbuffer = i915_error_object_create(dev, 680 error->ringbuffer = i915_error_object_create(dev,
@@ -667,6 +718,8 @@ static void i915_capture_error_state(struct drm_device *dev)
667 718
668 do_gettimeofday(&error->time); 719 do_gettimeofday(&error->time);
669 720
721 error->overlay = intel_overlay_capture_error_state(dev);
722
670 spin_lock_irqsave(&dev_priv->error_lock, flags); 723 spin_lock_irqsave(&dev_priv->error_lock, flags);
671 if (dev_priv->first_error == NULL) { 724 if (dev_priv->first_error == NULL) {
672 dev_priv->first_error = error; 725 dev_priv->first_error = error;
@@ -834,6 +887,49 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
834 queue_work(dev_priv->wq, &dev_priv->error_work); 887 queue_work(dev_priv->wq, &dev_priv->error_work);
835} 888}
836 889
890static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
891{
892 drm_i915_private_t *dev_priv = dev->dev_private;
893 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
894 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
895 struct drm_i915_gem_object *obj_priv;
896 struct intel_unpin_work *work;
897 unsigned long flags;
898 bool stall_detected;
899
900 /* Ignore early vblank irqs */
901 if (intel_crtc == NULL)
902 return;
903
904 spin_lock_irqsave(&dev->event_lock, flags);
905 work = intel_crtc->unpin_work;
906
907 if (work == NULL || work->pending || !work->enable_stall_check) {
908 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
909 spin_unlock_irqrestore(&dev->event_lock, flags);
910 return;
911 }
912
913 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
914 obj_priv = to_intel_bo(work->pending_flip_obj);
915 if(IS_I965G(dev)) {
916 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
917 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
918 } else {
919 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
920 stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
921 crtc->y * crtc->fb->pitch +
922 crtc->x * crtc->fb->bits_per_pixel/8);
923 }
924
925 spin_unlock_irqrestore(&dev->event_lock, flags);
926
927 if (stall_detected) {
928 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
929 intel_prepare_page_flip(dev, intel_crtc->plane);
930 }
931}
932
837irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 933irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
838{ 934{
839 struct drm_device *dev = (struct drm_device *) arg; 935 struct drm_device *dev = (struct drm_device *) arg;
@@ -951,15 +1047,19 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
951 if (pipea_stats & vblank_status) { 1047 if (pipea_stats & vblank_status) {
952 vblank++; 1048 vblank++;
953 drm_handle_vblank(dev, 0); 1049 drm_handle_vblank(dev, 0);
954 if (!dev_priv->flip_pending_is_done) 1050 if (!dev_priv->flip_pending_is_done) {
1051 i915_pageflip_stall_check(dev, 0);
955 intel_finish_page_flip(dev, 0); 1052 intel_finish_page_flip(dev, 0);
1053 }
956 } 1054 }
957 1055
958 if (pipeb_stats & vblank_status) { 1056 if (pipeb_stats & vblank_status) {
959 vblank++; 1057 vblank++;
960 drm_handle_vblank(dev, 1); 1058 drm_handle_vblank(dev, 1);
961 if (!dev_priv->flip_pending_is_done) 1059 if (!dev_priv->flip_pending_is_done) {
1060 i915_pageflip_stall_check(dev, 1);
962 intel_finish_page_flip(dev, 1); 1061 intel_finish_page_flip(dev, 1);
1062 }
963 } 1063 }
964 1064
965 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || 1065 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
@@ -1251,6 +1351,16 @@ void i915_hangcheck_elapsed(unsigned long data)
1251 &dev_priv->render_ring), 1351 &dev_priv->render_ring),
1252 i915_get_tail_request(dev)->seqno)) { 1352 i915_get_tail_request(dev)->seqno)) {
1253 dev_priv->hangcheck_count = 0; 1353 dev_priv->hangcheck_count = 0;
1354
1355 /* Issue a wake-up to catch stuck h/w. */
1356 if (dev_priv->render_ring.waiting_gem_seqno |
1357 dev_priv->bsd_ring.waiting_gem_seqno) {
1358 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
1359 if (dev_priv->render_ring.waiting_gem_seqno)
1360 DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
1361 if (dev_priv->bsd_ring.waiting_gem_seqno)
1362 DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
1363 }
1254 return; 1364 return;
1255 } 1365 }
1256 1366
@@ -1318,12 +1428,17 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1318 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); 1428 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
1319 (void) I915_READ(DEIER); 1429 (void) I915_READ(DEIER);
1320 1430
1321 /* user interrupt should be enabled, but masked initial */ 1431 /* Gen6 only needs render pipe_control now */
1432 if (IS_GEN6(dev))
1433 render_mask = GT_PIPE_NOTIFY;
1434
1322 dev_priv->gt_irq_mask_reg = ~render_mask; 1435 dev_priv->gt_irq_mask_reg = ~render_mask;
1323 dev_priv->gt_irq_enable_reg = render_mask; 1436 dev_priv->gt_irq_enable_reg = render_mask;
1324 1437
1325 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1438 I915_WRITE(GTIIR, I915_READ(GTIIR));
1326 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 1439 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
1440 if (IS_GEN6(dev))
1441 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
1327 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1442 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1328 (void) I915_READ(GTIER); 1443 (void) I915_READ(GTIER);
1329 1444
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index d1bf92b99788..ea5d3fea4b61 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -114,10 +114,6 @@ struct opregion_asle {
114#define ASLE_REQ_MSK 0xf 114#define ASLE_REQ_MSK 0xf
115 115
116/* response bits of ASLE irq request */ 116/* response bits of ASLE irq request */
117#define ASLE_ALS_ILLUM_FAIL (2<<10)
118#define ASLE_BACKLIGHT_FAIL (2<<12)
119#define ASLE_PFIT_FAIL (2<<14)
120#define ASLE_PWM_FREQ_FAIL (2<<16)
121#define ASLE_ALS_ILLUM_FAILED (1<<10) 117#define ASLE_ALS_ILLUM_FAILED (1<<10)
122#define ASLE_BACKLIGHT_FAILED (1<<12) 118#define ASLE_BACKLIGHT_FAILED (1<<12)
123#define ASLE_PFIT_FAILED (1<<14) 119#define ASLE_PFIT_FAILED (1<<14)
@@ -155,11 +151,11 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
155 u32 max_backlight, level, shift; 151 u32 max_backlight, level, shift;
156 152
157 if (!(bclp & ASLE_BCLP_VALID)) 153 if (!(bclp & ASLE_BCLP_VALID))
158 return ASLE_BACKLIGHT_FAIL; 154 return ASLE_BACKLIGHT_FAILED;
159 155
160 bclp &= ASLE_BCLP_MSK; 156 bclp &= ASLE_BCLP_MSK;
161 if (bclp < 0 || bclp > 255) 157 if (bclp < 0 || bclp > 255)
162 return ASLE_BACKLIGHT_FAIL; 158 return ASLE_BACKLIGHT_FAILED;
163 159
164 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); 160 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
165 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); 161 blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
@@ -211,7 +207,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
211 /* Panel fitting is currently controlled by the X code, so this is a 207 /* Panel fitting is currently controlled by the X code, so this is a
212 noop until modesetting support works fully */ 208 noop until modesetting support works fully */
213 if (!(pfit & ASLE_PFIT_VALID)) 209 if (!(pfit & ASLE_PFIT_VALID))
214 return ASLE_PFIT_FAIL; 210 return ASLE_PFIT_FAILED;
215 return 0; 211 return 0;
216} 212}
217 213
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 281db6e5403a..d094e9129223 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -170,6 +170,7 @@
170#define MI_NO_WRITE_FLUSH (1 << 2) 170#define MI_NO_WRITE_FLUSH (1 << 2)
171#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ 171#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
172#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 172#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
173#define MI_INVALIDATE_ISP (1 << 5) /* invalidate indirect state pointers */
173#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 174#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
174#define MI_REPORT_HEAD MI_INSTR(0x07, 0) 175#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
175#define MI_OVERLAY_FLIP MI_INSTR(0x11,0) 176#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
@@ -180,6 +181,12 @@
180#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) 181#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
181#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) 182#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
182#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) 183#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
184#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
185#define MI_MM_SPACE_GTT (1<<8)
186#define MI_MM_SPACE_PHYSICAL (0<<8)
187#define MI_SAVE_EXT_STATE_EN (1<<3)
188#define MI_RESTORE_EXT_STATE_EN (1<<2)
189#define MI_RESTORE_INHIBIT (1<<0)
183#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 190#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
184#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 191#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
185#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 192#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -312,6 +319,7 @@
312 319
313#define MI_MODE 0x0209c 320#define MI_MODE 0x0209c
314# define VS_TIMER_DISPATCH (1 << 6) 321# define VS_TIMER_DISPATCH (1 << 6)
322# define MI_FLUSH_ENABLE (1 << 11)
315 323
316#define SCPD0 0x0209c /* 915+ only */ 324#define SCPD0 0x0209c /* 915+ only */
317#define IER 0x020a0 325#define IER 0x020a0
@@ -1100,6 +1108,11 @@
1100#define PEG_BAND_GAP_DATA 0x14d68 1108#define PEG_BAND_GAP_DATA 0x14d68
1101 1109
1102/* 1110/*
1111 * Logical Context regs
1112 */
1113#define CCID 0x2180
1114#define CCID_EN (1<<0)
1115/*
1103 * Overlay regs 1116 * Overlay regs
1104 */ 1117 */
1105 1118
@@ -2069,6 +2082,7 @@
2069#define PIPE_DITHER_TYPE_ST01 (1 << 2) 2082#define PIPE_DITHER_TYPE_ST01 (1 << 2)
2070/* Pipe A */ 2083/* Pipe A */
2071#define PIPEADSL 0x70000 2084#define PIPEADSL 0x70000
2085#define DSL_LINEMASK 0x00000fff
2072#define PIPEACONF 0x70008 2086#define PIPEACONF 0x70008
2073#define PIPEACONF_ENABLE (1<<31) 2087#define PIPEACONF_ENABLE (1<<31)
2074#define PIPEACONF_DISABLE 0 2088#define PIPEACONF_DISABLE 0
@@ -2928,6 +2942,7 @@
2928#define TRANS_DP_VSYNC_ACTIVE_LOW 0 2942#define TRANS_DP_VSYNC_ACTIVE_LOW 0
2929#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) 2943#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
2930#define TRANS_DP_HSYNC_ACTIVE_LOW 0 2944#define TRANS_DP_HSYNC_ACTIVE_LOW 0
2945#define TRANS_DP_SYNC_MASK (3<<3)
2931 2946
2932/* SNB eDP training params */ 2947/* SNB eDP training params */
2933/* SNB A-stepping */ 2948/* SNB A-stepping */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6e2025274db5..2c6b98f2440e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,7 +34,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 u32 dpll_reg; 35 u32 dpll_reg;
36 36
37 if (IS_IRONLAKE(dev)) { 37 if (HAS_PCH_SPLIT(dev)) {
38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; 38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
39 } else { 39 } else {
40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; 40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
53 if (!i915_pipe_enabled(dev, pipe)) 53 if (!i915_pipe_enabled(dev, pipe))
54 return; 54 return;
55 55
56 if (IS_IRONLAKE(dev)) 56 if (HAS_PCH_SPLIT(dev))
57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
58 58
59 if (pipe == PIPE_A) 59 if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
75 if (!i915_pipe_enabled(dev, pipe)) 75 if (!i915_pipe_enabled(dev, pipe))
76 return; 76 return;
77 77
78 if (IS_IRONLAKE(dev)) 78 if (HAS_PCH_SPLIT(dev))
79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
80 80
81 if (pipe == PIPE_A) 81 if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 240 return;
241 241
242 if (IS_IRONLAKE(dev)) { 242 if (HAS_PCH_SPLIT(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
245 } 245 }
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
247 /* Pipe & plane A info */ 247 /* Pipe & plane A info */
248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
250 if (IS_IRONLAKE(dev)) { 250 if (HAS_PCH_SPLIT(dev)) {
251 dev_priv->saveFPA0 = I915_READ(PCH_FPA0); 251 dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
252 dev_priv->saveFPA1 = I915_READ(PCH_FPA1); 252 dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
253 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); 253 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
256 dev_priv->saveFPA1 = I915_READ(FPA1); 256 dev_priv->saveFPA1 = I915_READ(FPA1);
257 dev_priv->saveDPLL_A = I915_READ(DPLL_A); 257 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
258 } 258 }
259 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 259 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
264 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); 264 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
265 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); 265 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
266 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); 266 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
267 if (!IS_IRONLAKE(dev)) 267 if (!HAS_PCH_SPLIT(dev))
268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
269 269
270 if (IS_IRONLAKE(dev)) { 270 if (HAS_PCH_SPLIT(dev)) {
271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); 271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); 272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); 273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
304 /* Pipe & plane B info */ 304 /* Pipe & plane B info */
305 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 305 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
306 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); 306 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
307 if (IS_IRONLAKE(dev)) { 307 if (HAS_PCH_SPLIT(dev)) {
308 dev_priv->saveFPB0 = I915_READ(PCH_FPB0); 308 dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
309 dev_priv->saveFPB1 = I915_READ(PCH_FPB1); 309 dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
310 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); 310 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
313 dev_priv->saveFPB1 = I915_READ(FPB1); 313 dev_priv->saveFPB1 = I915_READ(FPB1);
314 dev_priv->saveDPLL_B = I915_READ(DPLL_B); 314 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
315 } 315 }
316 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 316 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
321 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); 321 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
322 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); 322 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
323 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); 323 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
324 if (!IS_IRONLAKE(dev)) 324 if (!HAS_PCH_SPLIT(dev))
325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); 325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
326 326
327 if (IS_IRONLAKE(dev)) { 327 if (HAS_PCH_SPLIT(dev)) {
328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); 328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); 329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); 330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
369 if (drm_core_check_feature(dev, DRIVER_MODESET)) 369 if (drm_core_check_feature(dev, DRIVER_MODESET))
370 return; 370 return;
371 371
372 if (IS_IRONLAKE(dev)) { 372 if (HAS_PCH_SPLIT(dev)) {
373 dpll_a_reg = PCH_DPLL_A; 373 dpll_a_reg = PCH_DPLL_A;
374 dpll_b_reg = PCH_DPLL_B; 374 dpll_b_reg = PCH_DPLL_B;
375 fpa0_reg = PCH_FPA0; 375 fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
385 fpb1_reg = FPB1; 385 fpb1_reg = FPB1;
386 } 386 }
387 387
388 if (IS_IRONLAKE(dev)) { 388 if (HAS_PCH_SPLIT(dev)) {
389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); 389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); 390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
391 } 391 }
@@ -395,16 +395,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
395 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 395 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
396 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & 396 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
397 ~DPLL_VCO_ENABLE); 397 ~DPLL_VCO_ENABLE);
398 DRM_UDELAY(150); 398 POSTING_READ(dpll_a_reg);
399 udelay(150);
399 } 400 }
400 I915_WRITE(fpa0_reg, dev_priv->saveFPA0); 401 I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
401 I915_WRITE(fpa1_reg, dev_priv->saveFPA1); 402 I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
402 /* Actually enable it */ 403 /* Actually enable it */
403 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); 404 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
404 DRM_UDELAY(150); 405 POSTING_READ(dpll_a_reg);
405 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 406 udelay(150);
407 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
406 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 408 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
407 DRM_UDELAY(150); 409 POSTING_READ(DPLL_A_MD);
410 }
411 udelay(150);
408 412
409 /* Restore mode */ 413 /* Restore mode */
410 I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); 414 I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
@@ -413,10 +417,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
413 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); 417 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
414 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); 418 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
415 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); 419 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
416 if (!IS_IRONLAKE(dev)) 420 if (!HAS_PCH_SPLIT(dev))
417 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 421 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
418 422
419 if (IS_IRONLAKE(dev)) { 423 if (HAS_PCH_SPLIT(dev)) {
420 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); 424 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
421 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); 425 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
422 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); 426 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -460,16 +464,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
460 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 464 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
461 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & 465 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
462 ~DPLL_VCO_ENABLE); 466 ~DPLL_VCO_ENABLE);
463 DRM_UDELAY(150); 467 POSTING_READ(dpll_b_reg);
468 udelay(150);
464 } 469 }
465 I915_WRITE(fpb0_reg, dev_priv->saveFPB0); 470 I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
466 I915_WRITE(fpb1_reg, dev_priv->saveFPB1); 471 I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
467 /* Actually enable it */ 472 /* Actually enable it */
468 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 473 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
469 DRM_UDELAY(150); 474 POSTING_READ(dpll_b_reg);
470 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 475 udelay(150);
476 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
471 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 477 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
472 DRM_UDELAY(150); 478 POSTING_READ(DPLL_B_MD);
479 }
480 udelay(150);
473 481
474 /* Restore mode */ 482 /* Restore mode */
475 I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); 483 I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
@@ -478,10 +486,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
478 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); 486 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
479 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); 487 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
480 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); 488 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
481 if (!IS_IRONLAKE(dev)) 489 if (!HAS_PCH_SPLIT(dev))
482 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 490 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
483 491
484 if (IS_IRONLAKE(dev)) { 492 if (HAS_PCH_SPLIT(dev)) {
485 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); 493 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
486 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); 494 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
487 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); 495 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +554,14 @@ void i915_save_display(struct drm_device *dev)
546 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 554 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
547 555
548 /* CRT state */ 556 /* CRT state */
549 if (IS_IRONLAKE(dev)) { 557 if (HAS_PCH_SPLIT(dev)) {
550 dev_priv->saveADPA = I915_READ(PCH_ADPA); 558 dev_priv->saveADPA = I915_READ(PCH_ADPA);
551 } else { 559 } else {
552 dev_priv->saveADPA = I915_READ(ADPA); 560 dev_priv->saveADPA = I915_READ(ADPA);
553 } 561 }
554 562
555 /* LVDS state */ 563 /* LVDS state */
556 if (IS_IRONLAKE(dev)) { 564 if (HAS_PCH_SPLIT(dev)) {
557 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 565 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
558 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 566 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
559 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 567 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +579,10 @@ void i915_save_display(struct drm_device *dev)
571 dev_priv->saveLVDS = I915_READ(LVDS); 579 dev_priv->saveLVDS = I915_READ(LVDS);
572 } 580 }
573 581
574 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev)) 582 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
575 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 583 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
576 584
577 if (IS_IRONLAKE(dev)) { 585 if (HAS_PCH_SPLIT(dev)) {
578 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 586 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
579 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 587 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
580 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 588 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -602,7 +610,7 @@ void i915_save_display(struct drm_device *dev)
602 610
603 /* Only save FBC state on the platform that supports FBC */ 611 /* Only save FBC state on the platform that supports FBC */
604 if (I915_HAS_FBC(dev)) { 612 if (I915_HAS_FBC(dev)) {
605 if (IS_IRONLAKE_M(dev)) { 613 if (HAS_PCH_SPLIT(dev)) {
606 dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); 614 dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
607 } else if (IS_GM45(dev)) { 615 } else if (IS_GM45(dev)) {
608 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); 616 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
@@ -618,7 +626,7 @@ void i915_save_display(struct drm_device *dev)
618 dev_priv->saveVGA0 = I915_READ(VGA0); 626 dev_priv->saveVGA0 = I915_READ(VGA0);
619 dev_priv->saveVGA1 = I915_READ(VGA1); 627 dev_priv->saveVGA1 = I915_READ(VGA1);
620 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 628 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
621 if (IS_IRONLAKE(dev)) 629 if (HAS_PCH_SPLIT(dev))
622 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); 630 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
623 else 631 else
624 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 632 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -660,24 +668,24 @@ void i915_restore_display(struct drm_device *dev)
660 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 668 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
661 669
662 /* CRT state */ 670 /* CRT state */
663 if (IS_IRONLAKE(dev)) 671 if (HAS_PCH_SPLIT(dev))
664 I915_WRITE(PCH_ADPA, dev_priv->saveADPA); 672 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
665 else 673 else
666 I915_WRITE(ADPA, dev_priv->saveADPA); 674 I915_WRITE(ADPA, dev_priv->saveADPA);
667 675
668 /* LVDS state */ 676 /* LVDS state */
669 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) 677 if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
670 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 678 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
671 679
672 if (IS_IRONLAKE(dev)) { 680 if (HAS_PCH_SPLIT(dev)) {
673 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); 681 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
674 } else if (IS_MOBILE(dev) && !IS_I830(dev)) 682 } else if (IS_MOBILE(dev) && !IS_I830(dev))
675 I915_WRITE(LVDS, dev_priv->saveLVDS); 683 I915_WRITE(LVDS, dev_priv->saveLVDS);
676 684
677 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev)) 685 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
678 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); 686 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
679 687
680 if (IS_IRONLAKE(dev)) { 688 if (HAS_PCH_SPLIT(dev)) {
681 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); 689 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
682 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); 690 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
683 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); 691 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -708,7 +716,7 @@ void i915_restore_display(struct drm_device *dev)
708 716
709 /* only restore FBC info on the platform that supports FBC*/ 717 /* only restore FBC info on the platform that supports FBC*/
710 if (I915_HAS_FBC(dev)) { 718 if (I915_HAS_FBC(dev)) {
711 if (IS_IRONLAKE_M(dev)) { 719 if (HAS_PCH_SPLIT(dev)) {
712 ironlake_disable_fbc(dev); 720 ironlake_disable_fbc(dev);
713 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 721 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
714 } else if (IS_GM45(dev)) { 722 } else if (IS_GM45(dev)) {
@@ -723,14 +731,15 @@ void i915_restore_display(struct drm_device *dev)
723 } 731 }
724 } 732 }
725 /* VGA state */ 733 /* VGA state */
726 if (IS_IRONLAKE(dev)) 734 if (HAS_PCH_SPLIT(dev))
727 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 735 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
728 else 736 else
729 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 737 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
730 I915_WRITE(VGA0, dev_priv->saveVGA0); 738 I915_WRITE(VGA0, dev_priv->saveVGA0);
731 I915_WRITE(VGA1, dev_priv->saveVGA1); 739 I915_WRITE(VGA1, dev_priv->saveVGA1);
732 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 740 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
733 DRM_UDELAY(150); 741 POSTING_READ(VGA_PD);
742 udelay(150);
734 743
735 i915_restore_vga(dev); 744 i915_restore_vga(dev);
736} 745}
@@ -748,7 +757,7 @@ int i915_save_state(struct drm_device *dev)
748 i915_save_display(dev); 757 i915_save_display(dev);
749 758
750 /* Interrupt state */ 759 /* Interrupt state */
751 if (IS_IRONLAKE(dev)) { 760 if (HAS_PCH_SPLIT(dev)) {
752 dev_priv->saveDEIER = I915_READ(DEIER); 761 dev_priv->saveDEIER = I915_READ(DEIER);
753 dev_priv->saveDEIMR = I915_READ(DEIMR); 762 dev_priv->saveDEIMR = I915_READ(DEIMR);
754 dev_priv->saveGTIER = I915_READ(GTIER); 763 dev_priv->saveGTIER = I915_READ(GTIER);
@@ -762,7 +771,7 @@ int i915_save_state(struct drm_device *dev)
762 dev_priv->saveIMR = I915_READ(IMR); 771 dev_priv->saveIMR = I915_READ(IMR);
763 } 772 }
764 773
765 if (IS_IRONLAKE_M(dev)) 774 if (HAS_PCH_SPLIT(dev))
766 ironlake_disable_drps(dev); 775 ironlake_disable_drps(dev);
767 776
768 /* Cache mode state */ 777 /* Cache mode state */
@@ -820,7 +829,7 @@ int i915_restore_state(struct drm_device *dev)
820 i915_restore_display(dev); 829 i915_restore_display(dev);
821 830
822 /* Interrupt state */ 831 /* Interrupt state */
823 if (IS_IRONLAKE(dev)) { 832 if (HAS_PCH_SPLIT(dev)) {
824 I915_WRITE(DEIER, dev_priv->saveDEIER); 833 I915_WRITE(DEIER, dev_priv->saveDEIER);
825 I915_WRITE(DEIMR, dev_priv->saveDEIMR); 834 I915_WRITE(DEIMR, dev_priv->saveDEIMR);
826 I915_WRITE(GTIER, dev_priv->saveGTIER); 835 I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -835,7 +844,7 @@ int i915_restore_state(struct drm_device *dev)
835 /* Clock gating state */ 844 /* Clock gating state */
836 intel_init_clock_gating(dev); 845 intel_init_clock_gating(dev);
837 846
838 if (IS_IRONLAKE_M(dev)) 847 if (HAS_PCH_SPLIT(dev))
839 ironlake_enable_drps(dev); 848 ironlake_enable_drps(dev);
840 849
841 /* Cache mode state */ 850 /* Cache mode state */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ee0732b222a1..4b7735196cd5 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -160,19 +160,20 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
160 struct drm_i915_private *dev_priv = dev->dev_private; 160 struct drm_i915_private *dev_priv = dev->dev_private;
161 u32 adpa, temp; 161 u32 adpa, temp;
162 bool ret; 162 bool ret;
163 bool turn_off_dac = false;
163 164
164 temp = adpa = I915_READ(PCH_ADPA); 165 temp = adpa = I915_READ(PCH_ADPA);
165 166
166 if (HAS_PCH_CPT(dev)) { 167 if (HAS_PCH_SPLIT(dev))
167 /* Disable DAC before force detect */ 168 turn_off_dac = true;
168 I915_WRITE(PCH_ADPA, adpa & ~ADPA_DAC_ENABLE); 169
169 (void)I915_READ(PCH_ADPA); 170 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
170 } else { 171 if (turn_off_dac)
171 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 172 adpa &= ~ADPA_DAC_ENABLE;
172 /* disable HPD first */ 173
173 I915_WRITE(PCH_ADPA, adpa); 174 /* disable HPD first */
174 (void)I915_READ(PCH_ADPA); 175 I915_WRITE(PCH_ADPA, adpa);
175 } 176 (void)I915_READ(PCH_ADPA);
176 177
177 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 178 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
178 ADPA_CRT_HOTPLUG_WARMUP_10MS | 179 ADPA_CRT_HOTPLUG_WARMUP_10MS |
@@ -185,10 +186,11 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
185 DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); 186 DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
186 I915_WRITE(PCH_ADPA, adpa); 187 I915_WRITE(PCH_ADPA, adpa);
187 188
188 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) 189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
189 ; 190 1000, 1))
191 DRM_ERROR("timed out waiting for FORCE_TRIGGER");
190 192
191 if (HAS_PCH_CPT(dev)) { 193 if (turn_off_dac) {
192 I915_WRITE(PCH_ADPA, temp); 194 I915_WRITE(PCH_ADPA, temp);
193 (void)I915_READ(PCH_ADPA); 195 (void)I915_READ(PCH_ADPA);
194 } 196 }
@@ -237,17 +239,13 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
237 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT; 239 hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
238 240
239 for (i = 0; i < tries ; i++) { 241 for (i = 0; i < tries ; i++) {
240 unsigned long timeout;
241 /* turn on the FORCE_DETECT */ 242 /* turn on the FORCE_DETECT */
242 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 243 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
243 timeout = jiffies + msecs_to_jiffies(1000);
244 /* wait for FORCE_DETECT to go off */ 244 /* wait for FORCE_DETECT to go off */
245 do { 245 if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
246 if (!(I915_READ(PORT_HOTPLUG_EN) & 246 CRT_HOTPLUG_FORCE_DETECT) == 0,
247 CRT_HOTPLUG_FORCE_DETECT)) 247 1000, 1))
248 break; 248 DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
249 msleep(1);
250 } while (time_after(timeout, jiffies));
251 } 249 }
252 250
253 stat = I915_READ(PORT_HOTPLUG_STAT); 251 stat = I915_READ(PORT_HOTPLUG_STAT);
@@ -331,7 +329,7 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
331 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER); 329 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
332 /* Wait for next Vblank to substitue 330 /* Wait for next Vblank to substitue
333 * border color for Color info */ 331 * border color for Color info */
334 intel_wait_for_vblank(dev); 332 intel_wait_for_vblank(dev, pipe);
335 st00 = I915_READ8(VGA_MSR_WRITE); 333 st00 = I915_READ8(VGA_MSR_WRITE);
336 status = ((st00 & (1 << 4)) != 0) ? 334 status = ((st00 & (1 << 4)) != 0) ?
337 connector_status_connected : 335 connector_status_connected :
@@ -508,17 +506,8 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
508 .best_encoder = intel_attached_encoder, 506 .best_encoder = intel_attached_encoder,
509}; 507};
510 508
511static void intel_crt_enc_destroy(struct drm_encoder *encoder)
512{
513 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
514
515 intel_i2c_destroy(intel_encoder->ddc_bus);
516 drm_encoder_cleanup(encoder);
517 kfree(intel_encoder);
518}
519
520static const struct drm_encoder_funcs intel_crt_enc_funcs = { 509static const struct drm_encoder_funcs intel_crt_enc_funcs = {
521 .destroy = intel_crt_enc_destroy, 510 .destroy = intel_encoder_destroy,
522}; 511};
523 512
524void intel_crt_init(struct drm_device *dev) 513void intel_crt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5ec10e02341b..40cc5da264a9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -29,6 +29,7 @@
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/vgaarb.h>
32#include "drmP.h" 33#include "drmP.h"
33#include "intel_drv.h" 34#include "intel_drv.h"
34#include "i915_drm.h" 35#include "i915_drm.h"
@@ -976,14 +977,70 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
976 return true; 977 return true;
977} 978}
978 979
979void 980/**
980intel_wait_for_vblank(struct drm_device *dev) 981 * intel_wait_for_vblank - wait for vblank on a given pipe
982 * @dev: drm device
983 * @pipe: pipe to wait for
984 *
985 * Wait for vblank to occur on a given pipe. Needed for various bits of
986 * mode setting code.
987 */
988void intel_wait_for_vblank(struct drm_device *dev, int pipe)
981{ 989{
982 /* Wait for 20ms, i.e. one cycle at 50hz. */ 990 struct drm_i915_private *dev_priv = dev->dev_private;
983 if (in_dbg_master()) 991 int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
984 mdelay(20); /* The kernel debugger cannot call msleep() */ 992
985 else 993 /* Clear existing vblank status. Note this will clear any other
986 msleep(20); 994 * sticky status fields as well.
995 *
996 * This races with i915_driver_irq_handler() with the result
997 * that either function could miss a vblank event. Here it is not
998 * fatal, as we will either wait upon the next vblank interrupt or
999 * timeout. Generally speaking intel_wait_for_vblank() is only
1000 * called during modeset at which time the GPU should be idle and
1001 * should *not* be performing page flips and thus not waiting on
1002 * vblanks...
1003 * Currently, the result of us stealing a vblank from the irq
1004 * handler is that a single frame will be skipped during swapbuffers.
1005 */
1006 I915_WRITE(pipestat_reg,
1007 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
1008
1009 /* Wait for vblank interrupt bit to set */
1010 if (wait_for((I915_READ(pipestat_reg) &
1011 PIPE_VBLANK_INTERRUPT_STATUS),
1012 50, 0))
1013 DRM_DEBUG_KMS("vblank wait timed out\n");
1014}
1015
1016/**
1017 * intel_wait_for_vblank_off - wait for vblank after disabling a pipe
1018 * @dev: drm device
1019 * @pipe: pipe to wait for
1020 *
1021 * After disabling a pipe, we can't wait for vblank in the usual way,
1022 * spinning on the vblank interrupt status bit, since we won't actually
1023 * see an interrupt when the pipe is disabled.
1024 *
1025 * So this function waits for the display line value to settle (it
1026 * usually ends up stopping at the start of the next frame).
1027 */
1028void intel_wait_for_vblank_off(struct drm_device *dev, int pipe)
1029{
1030 struct drm_i915_private *dev_priv = dev->dev_private;
1031 int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL);
1032 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1033 u32 last_line;
1034
1035 /* Wait for the display line to settle */
1036 do {
1037 last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK;
1038 mdelay(5);
1039 } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) &&
1040 time_after(timeout, jiffies));
1041
1042 if (time_after(jiffies, timeout))
1043 DRM_DEBUG_KMS("vblank wait timed out\n");
987} 1044}
988 1045
989/* Parameters have changed, update FBC info */ 1046/* Parameters have changed, update FBC info */
@@ -1037,7 +1094,6 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1037void i8xx_disable_fbc(struct drm_device *dev) 1094void i8xx_disable_fbc(struct drm_device *dev)
1038{ 1095{
1039 struct drm_i915_private *dev_priv = dev->dev_private; 1096 struct drm_i915_private *dev_priv = dev->dev_private;
1040 unsigned long timeout = jiffies + msecs_to_jiffies(1);
1041 u32 fbc_ctl; 1097 u32 fbc_ctl;
1042 1098
1043 if (!I915_HAS_FBC(dev)) 1099 if (!I915_HAS_FBC(dev))
@@ -1052,16 +1108,11 @@ void i8xx_disable_fbc(struct drm_device *dev)
1052 I915_WRITE(FBC_CONTROL, fbc_ctl); 1108 I915_WRITE(FBC_CONTROL, fbc_ctl);
1053 1109
1054 /* Wait for compressing bit to clear */ 1110 /* Wait for compressing bit to clear */
1055 while (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) { 1111 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 0)) {
1056 if (time_after(jiffies, timeout)) { 1112 DRM_DEBUG_KMS("FBC idle timed out\n");
1057 DRM_DEBUG_DRIVER("FBC idle timed out\n"); 1113 return;
1058 break;
1059 }
1060 ; /* do nothing */
1061 } 1114 }
1062 1115
1063 intel_wait_for_vblank(dev);
1064
1065 DRM_DEBUG_KMS("disabled FBC\n"); 1116 DRM_DEBUG_KMS("disabled FBC\n");
1066} 1117}
1067 1118
@@ -1118,7 +1169,6 @@ void g4x_disable_fbc(struct drm_device *dev)
1118 dpfc_ctl = I915_READ(DPFC_CONTROL); 1169 dpfc_ctl = I915_READ(DPFC_CONTROL);
1119 dpfc_ctl &= ~DPFC_CTL_EN; 1170 dpfc_ctl &= ~DPFC_CTL_EN;
1120 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 1171 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1121 intel_wait_for_vblank(dev);
1122 1172
1123 DRM_DEBUG_KMS("disabled FBC\n"); 1173 DRM_DEBUG_KMS("disabled FBC\n");
1124} 1174}
@@ -1179,7 +1229,6 @@ void ironlake_disable_fbc(struct drm_device *dev)
1179 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1229 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1180 dpfc_ctl &= ~DPFC_CTL_EN; 1230 dpfc_ctl &= ~DPFC_CTL_EN;
1181 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 1231 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1182 intel_wait_for_vblank(dev);
1183 1232
1184 DRM_DEBUG_KMS("disabled FBC\n"); 1233 DRM_DEBUG_KMS("disabled FBC\n");
1185} 1234}
@@ -1453,7 +1502,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1453 dspcntr &= ~DISPPLANE_TILED; 1502 dspcntr &= ~DISPPLANE_TILED;
1454 } 1503 }
1455 1504
1456 if (IS_IRONLAKE(dev)) 1505 if (HAS_PCH_SPLIT(dev))
1457 /* must disable */ 1506 /* must disable */
1458 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1507 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1459 1508
@@ -1462,23 +1511,22 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1462 Start = obj_priv->gtt_offset; 1511 Start = obj_priv->gtt_offset;
1463 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 1512 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1464 1513
1465 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1514 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1515 Start, Offset, x, y, fb->pitch);
1466 I915_WRITE(dspstride, fb->pitch); 1516 I915_WRITE(dspstride, fb->pitch);
1467 if (IS_I965G(dev)) { 1517 if (IS_I965G(dev)) {
1468 I915_WRITE(dspbase, Offset);
1469 I915_READ(dspbase);
1470 I915_WRITE(dspsurf, Start); 1518 I915_WRITE(dspsurf, Start);
1471 I915_READ(dspsurf);
1472 I915_WRITE(dsptileoff, (y << 16) | x); 1519 I915_WRITE(dsptileoff, (y << 16) | x);
1520 I915_WRITE(dspbase, Offset);
1473 } else { 1521 } else {
1474 I915_WRITE(dspbase, Start + Offset); 1522 I915_WRITE(dspbase, Start + Offset);
1475 I915_READ(dspbase);
1476 } 1523 }
1524 POSTING_READ(dspbase);
1477 1525
1478 if ((IS_I965G(dev) || plane == 0)) 1526 if (IS_I965G(dev) || plane == 0)
1479 intel_update_fbc(crtc, &crtc->mode); 1527 intel_update_fbc(crtc, &crtc->mode);
1480 1528
1481 intel_wait_for_vblank(dev); 1529 intel_wait_for_vblank(dev, intel_crtc->pipe);
1482 intel_increase_pllclock(crtc, true); 1530 intel_increase_pllclock(crtc, true);
1483 1531
1484 return 0; 1532 return 0;
@@ -1489,7 +1537,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1489 struct drm_framebuffer *old_fb) 1537 struct drm_framebuffer *old_fb)
1490{ 1538{
1491 struct drm_device *dev = crtc->dev; 1539 struct drm_device *dev = crtc->dev;
1492 struct drm_i915_private *dev_priv = dev->dev_private;
1493 struct drm_i915_master_private *master_priv; 1540 struct drm_i915_master_private *master_priv;
1494 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1495 struct intel_framebuffer *intel_fb; 1542 struct intel_framebuffer *intel_fb;
@@ -1497,13 +1544,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1497 struct drm_gem_object *obj; 1544 struct drm_gem_object *obj;
1498 int pipe = intel_crtc->pipe; 1545 int pipe = intel_crtc->pipe;
1499 int plane = intel_crtc->plane; 1546 int plane = intel_crtc->plane;
1500 unsigned long Start, Offset;
1501 int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
1502 int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
1503 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1504 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1505 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1506 u32 dspcntr;
1507 int ret; 1547 int ret;
1508 1548
1509 /* no fb bound */ 1549 /* no fb bound */
@@ -1539,73 +1579,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1539 return ret; 1579 return ret;
1540 } 1580 }
1541 1581
1542 dspcntr = I915_READ(dspcntr_reg); 1582 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y);
1543 /* Mask out pixel format bits in case we change it */ 1583 if (ret) {
1544 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1545 switch (crtc->fb->bits_per_pixel) {
1546 case 8:
1547 dspcntr |= DISPPLANE_8BPP;
1548 break;
1549 case 16:
1550 if (crtc->fb->depth == 15)
1551 dspcntr |= DISPPLANE_15_16BPP;
1552 else
1553 dspcntr |= DISPPLANE_16BPP;
1554 break;
1555 case 24:
1556 case 32:
1557 if (crtc->fb->depth == 30)
1558 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1559 else
1560 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1561 break;
1562 default:
1563 DRM_ERROR("Unknown color depth\n");
1564 i915_gem_object_unpin(obj); 1584 i915_gem_object_unpin(obj);
1565 mutex_unlock(&dev->struct_mutex); 1585 mutex_unlock(&dev->struct_mutex);
1566 return -EINVAL; 1586 return ret;
1567 }
1568 if (IS_I965G(dev)) {
1569 if (obj_priv->tiling_mode != I915_TILING_NONE)
1570 dspcntr |= DISPPLANE_TILED;
1571 else
1572 dspcntr &= ~DISPPLANE_TILED;
1573 }
1574
1575 if (HAS_PCH_SPLIT(dev))
1576 /* must disable */
1577 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1578
1579 I915_WRITE(dspcntr_reg, dspcntr);
1580
1581 Start = obj_priv->gtt_offset;
1582 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1583
1584 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1585 Start, Offset, x, y, crtc->fb->pitch);
1586 I915_WRITE(dspstride, crtc->fb->pitch);
1587 if (IS_I965G(dev)) {
1588 I915_WRITE(dspbase, Offset);
1589 I915_READ(dspbase);
1590 I915_WRITE(dspsurf, Start);
1591 I915_READ(dspsurf);
1592 I915_WRITE(dsptileoff, (y << 16) | x);
1593 } else {
1594 I915_WRITE(dspbase, Start + Offset);
1595 I915_READ(dspbase);
1596 } 1587 }
1597 1588
1598 if ((IS_I965G(dev) || plane == 0))
1599 intel_update_fbc(crtc, &crtc->mode);
1600
1601 intel_wait_for_vblank(dev);
1602
1603 if (old_fb) { 1589 if (old_fb) {
1604 intel_fb = to_intel_framebuffer(old_fb); 1590 intel_fb = to_intel_framebuffer(old_fb);
1605 obj_priv = to_intel_bo(intel_fb->obj); 1591 obj_priv = to_intel_bo(intel_fb->obj);
1606 i915_gem_object_unpin(intel_fb->obj); 1592 i915_gem_object_unpin(intel_fb->obj);
1607 } 1593 }
1608 intel_increase_pllclock(crtc, true);
1609 1594
1610 mutex_unlock(&dev->struct_mutex); 1595 mutex_unlock(&dev->struct_mutex);
1611 1596
@@ -1627,54 +1612,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1627 return 0; 1612 return 0;
1628} 1613}
1629 1614
1630/* Disable the VGA plane that we never use */
1631static void i915_disable_vga (struct drm_device *dev)
1632{
1633 struct drm_i915_private *dev_priv = dev->dev_private;
1634 u8 sr1;
1635 u32 vga_reg;
1636
1637 if (HAS_PCH_SPLIT(dev))
1638 vga_reg = CPU_VGACNTRL;
1639 else
1640 vga_reg = VGACNTRL;
1641
1642 if (I915_READ(vga_reg) & VGA_DISP_DISABLE)
1643 return;
1644
1645 I915_WRITE8(VGA_SR_INDEX, 1);
1646 sr1 = I915_READ8(VGA_SR_DATA);
1647 I915_WRITE8(VGA_SR_DATA, sr1 | (1 << 5));
1648 udelay(100);
1649
1650 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1651}
1652
1653static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
1654{
1655 struct drm_device *dev = crtc->dev;
1656 struct drm_i915_private *dev_priv = dev->dev_private;
1657 u32 dpa_ctl;
1658
1659 DRM_DEBUG_KMS("\n");
1660 dpa_ctl = I915_READ(DP_A);
1661 dpa_ctl &= ~DP_PLL_ENABLE;
1662 I915_WRITE(DP_A, dpa_ctl);
1663}
1664
1665static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
1666{
1667 struct drm_device *dev = crtc->dev;
1668 struct drm_i915_private *dev_priv = dev->dev_private;
1669 u32 dpa_ctl;
1670
1671 dpa_ctl = I915_READ(DP_A);
1672 dpa_ctl |= DP_PLL_ENABLE;
1673 I915_WRITE(DP_A, dpa_ctl);
1674 udelay(200);
1675}
1676
1677
1678static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock) 1615static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
1679{ 1616{
1680 struct drm_device *dev = crtc->dev; 1617 struct drm_device *dev = crtc->dev;
@@ -1928,9 +1865,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1928 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL; 1865 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
1929 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL; 1866 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1930 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF; 1867 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
1931 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
1932 int pf_win_size = (pipe == 0) ? PFA_WIN_SZ : PFB_WIN_SZ;
1933 int pf_win_pos = (pipe == 0) ? PFA_WIN_POS : PFB_WIN_POS;
1934 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B; 1868 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
1935 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B; 1869 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
1936 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B; 1870 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
@@ -1945,7 +1879,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1945 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1879 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
1946 int trans_dpll_sel = (pipe == 0) ? 0 : 1; 1880 int trans_dpll_sel = (pipe == 0) ? 0 : 1;
1947 u32 temp; 1881 u32 temp;
1948 int n;
1949 u32 pipe_bpc; 1882 u32 pipe_bpc;
1950 1883
1951 temp = I915_READ(pipeconf_reg); 1884 temp = I915_READ(pipeconf_reg);
@@ -1958,7 +1891,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1958 case DRM_MODE_DPMS_ON: 1891 case DRM_MODE_DPMS_ON:
1959 case DRM_MODE_DPMS_STANDBY: 1892 case DRM_MODE_DPMS_STANDBY:
1960 case DRM_MODE_DPMS_SUSPEND: 1893 case DRM_MODE_DPMS_SUSPEND:
1961 DRM_DEBUG_KMS("crtc %d dpms on\n", pipe); 1894 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
1962 1895
1963 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 1896 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1964 temp = I915_READ(PCH_LVDS); 1897 temp = I915_READ(PCH_LVDS);
@@ -1968,10 +1901,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1968 } 1901 }
1969 } 1902 }
1970 1903
1971 if (HAS_eDP) { 1904 if (!HAS_eDP) {
1972 /* enable eDP PLL */
1973 ironlake_enable_pll_edp(crtc);
1974 } else {
1975 1905
1976 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1906 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1977 temp = I915_READ(fdi_rx_reg); 1907 temp = I915_READ(fdi_rx_reg);
@@ -2003,17 +1933,19 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2003 } 1933 }
2004 1934
2005 /* Enable panel fitting for LVDS */ 1935 /* Enable panel fitting for LVDS */
2006 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) 1936 if (dev_priv->pch_pf_size &&
2007 || HAS_eDP || intel_pch_has_edp(crtc)) { 1937 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
2008 temp = I915_READ(pf_ctl_reg); 1938 || HAS_eDP || intel_pch_has_edp(crtc))) {
2009 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); 1939 /* Force use of hard-coded filter coefficients
2010 1940 * as some pre-programmed values are broken,
2011 /* currently full aspect */ 1941 * e.g. x201.
2012 I915_WRITE(pf_win_pos, 0); 1942 */
2013 1943 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
2014 I915_WRITE(pf_win_size, 1944 PF_ENABLE | PF_FILTER_MED_3x3);
2015 (dev_priv->panel_fixed_mode->hdisplay << 16) | 1945 I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
2016 (dev_priv->panel_fixed_mode->vdisplay)); 1946 dev_priv->pch_pf_pos);
1947 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
1948 dev_priv->pch_pf_size);
2017 } 1949 }
2018 1950
2019 /* Enable CPU pipe */ 1951 /* Enable CPU pipe */
@@ -2097,9 +2029,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2097 int reg; 2029 int reg;
2098 2030
2099 reg = I915_READ(trans_dp_ctl); 2031 reg = I915_READ(trans_dp_ctl);
2100 reg &= ~TRANS_DP_PORT_SEL_MASK; 2032 reg &= ~(TRANS_DP_PORT_SEL_MASK |
2101 reg = TRANS_DP_OUTPUT_ENABLE | 2033 TRANS_DP_SYNC_MASK);
2102 TRANS_DP_ENH_FRAMING; 2034 reg |= (TRANS_DP_OUTPUT_ENABLE |
2035 TRANS_DP_ENH_FRAMING);
2103 2036
2104 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2037 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2105 reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2038 reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2137,18 +2070,17 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2137 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 2070 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
2138 I915_READ(transconf_reg); 2071 I915_READ(transconf_reg);
2139 2072
2140 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0) 2073 if (wait_for(I915_READ(transconf_reg) & TRANS_STATE_ENABLE, 100, 1))
2141 ; 2074 DRM_ERROR("failed to enable transcoder\n");
2142
2143 } 2075 }
2144 2076
2145 intel_crtc_load_lut(crtc); 2077 intel_crtc_load_lut(crtc);
2146 2078
2147 intel_update_fbc(crtc, &crtc->mode); 2079 intel_update_fbc(crtc, &crtc->mode);
2080 break;
2148 2081
2149 break;
2150 case DRM_MODE_DPMS_OFF: 2082 case DRM_MODE_DPMS_OFF:
2151 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 2083 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
2152 2084
2153 drm_vblank_off(dev, pipe); 2085 drm_vblank_off(dev, pipe);
2154 /* Disable display plane */ 2086 /* Disable display plane */
@@ -2164,40 +2096,22 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2164 dev_priv->display.disable_fbc) 2096 dev_priv->display.disable_fbc)
2165 dev_priv->display.disable_fbc(dev); 2097 dev_priv->display.disable_fbc(dev);
2166 2098
2167 i915_disable_vga(dev);
2168
2169 /* disable cpu pipe, disable after all planes disabled */ 2099 /* disable cpu pipe, disable after all planes disabled */
2170 temp = I915_READ(pipeconf_reg); 2100 temp = I915_READ(pipeconf_reg);
2171 if ((temp & PIPEACONF_ENABLE) != 0) { 2101 if ((temp & PIPEACONF_ENABLE) != 0) {
2172 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE); 2102 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
2173 I915_READ(pipeconf_reg); 2103
2174 n = 0;
2175 /* wait for cpu pipe off, pipe state */ 2104 /* wait for cpu pipe off, pipe state */
2176 while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0) { 2105 if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, 50, 1))
2177 n++; 2106 DRM_ERROR("failed to turn off cpu pipe\n");
2178 if (n < 60) {
2179 udelay(500);
2180 continue;
2181 } else {
2182 DRM_DEBUG_KMS("pipe %d off delay\n",
2183 pipe);
2184 break;
2185 }
2186 }
2187 } else 2107 } else
2188 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 2108 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
2189 2109
2190 udelay(100); 2110 udelay(100);
2191 2111
2192 /* Disable PF */ 2112 /* Disable PF */
2193 temp = I915_READ(pf_ctl_reg); 2113 I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
2194 if ((temp & PF_ENABLE) != 0) { 2114 I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
2195 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
2196 I915_READ(pf_ctl_reg);
2197 }
2198 I915_WRITE(pf_win_size, 0);
2199 POSTING_READ(pf_win_size);
2200
2201 2115
2202 /* disable CPU FDI tx and PCH FDI rx */ 2116 /* disable CPU FDI tx and PCH FDI rx */
2203 temp = I915_READ(fdi_tx_reg); 2117 temp = I915_READ(fdi_tx_reg);
@@ -2244,20 +2158,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2244 temp = I915_READ(transconf_reg); 2158 temp = I915_READ(transconf_reg);
2245 if ((temp & TRANS_ENABLE) != 0) { 2159 if ((temp & TRANS_ENABLE) != 0) {
2246 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE); 2160 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
2247 I915_READ(transconf_reg); 2161
2248 n = 0;
2249 /* wait for PCH transcoder off, transcoder state */ 2162 /* wait for PCH transcoder off, transcoder state */
2250 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0) { 2163 if (wait_for((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0, 50, 1))
2251 n++; 2164 DRM_ERROR("failed to disable transcoder\n");
2252 if (n < 60) {
2253 udelay(500);
2254 continue;
2255 } else {
2256 DRM_DEBUG_KMS("transcoder %d off "
2257 "delay\n", pipe);
2258 break;
2259 }
2260 }
2261 } 2165 }
2262 2166
2263 temp = I915_READ(transconf_reg); 2167 temp = I915_READ(transconf_reg);
@@ -2294,10 +2198,6 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2294 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE); 2198 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
2295 I915_READ(pch_dpll_reg); 2199 I915_READ(pch_dpll_reg);
2296 2200
2297 if (HAS_eDP) {
2298 ironlake_disable_pll_edp(crtc);
2299 }
2300
2301 /* Switch from PCDclk to Rawclk */ 2201 /* Switch from PCDclk to Rawclk */
2302 temp = I915_READ(fdi_rx_reg); 2202 temp = I915_READ(fdi_rx_reg);
2303 temp &= ~FDI_SEL_PCDCLK; 2203 temp &= ~FDI_SEL_PCDCLK;
@@ -2372,8 +2272,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2372 case DRM_MODE_DPMS_ON: 2272 case DRM_MODE_DPMS_ON:
2373 case DRM_MODE_DPMS_STANDBY: 2273 case DRM_MODE_DPMS_STANDBY:
2374 case DRM_MODE_DPMS_SUSPEND: 2274 case DRM_MODE_DPMS_SUSPEND:
2375 intel_update_watermarks(dev);
2376
2377 /* Enable the DPLL */ 2275 /* Enable the DPLL */
2378 temp = I915_READ(dpll_reg); 2276 temp = I915_READ(dpll_reg);
2379 if ((temp & DPLL_VCO_ENABLE) == 0) { 2277 if ((temp & DPLL_VCO_ENABLE) == 0) {
@@ -2413,8 +2311,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2413 intel_crtc_dpms_overlay(intel_crtc, true); 2311 intel_crtc_dpms_overlay(intel_crtc, true);
2414 break; 2312 break;
2415 case DRM_MODE_DPMS_OFF: 2313 case DRM_MODE_DPMS_OFF:
2416 intel_update_watermarks(dev);
2417
2418 /* Give the overlay scaler a chance to disable if it's on this pipe */ 2314 /* Give the overlay scaler a chance to disable if it's on this pipe */
2419 intel_crtc_dpms_overlay(intel_crtc, false); 2315 intel_crtc_dpms_overlay(intel_crtc, false);
2420 drm_vblank_off(dev, pipe); 2316 drm_vblank_off(dev, pipe);
@@ -2423,9 +2319,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2423 dev_priv->display.disable_fbc) 2319 dev_priv->display.disable_fbc)
2424 dev_priv->display.disable_fbc(dev); 2320 dev_priv->display.disable_fbc(dev);
2425 2321
2426 /* Disable the VGA plane that we never use */
2427 i915_disable_vga(dev);
2428
2429 /* Disable display plane */ 2322 /* Disable display plane */
2430 temp = I915_READ(dspcntr_reg); 2323 temp = I915_READ(dspcntr_reg);
2431 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 2324 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2435,10 +2328,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2435 I915_READ(dspbase_reg); 2328 I915_READ(dspbase_reg);
2436 } 2329 }
2437 2330
2438 if (!IS_I9XX(dev)) { 2331 /* Wait for vblank for the disable to take effect */
2439 /* Wait for vblank for the disable to take effect */ 2332 intel_wait_for_vblank_off(dev, pipe);
2440 intel_wait_for_vblank(dev);
2441 }
2442 2333
2443 /* Don't disable pipe A or pipe A PLLs if needed */ 2334 /* Don't disable pipe A or pipe A PLLs if needed */
2444 if (pipeconf_reg == PIPEACONF && 2335 if (pipeconf_reg == PIPEACONF &&
@@ -2453,7 +2344,7 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2453 } 2344 }
2454 2345
2455 /* Wait for vblank for the disable to take effect. */ 2346 /* Wait for vblank for the disable to take effect. */
2456 intel_wait_for_vblank(dev); 2347 intel_wait_for_vblank_off(dev, pipe);
2457 2348
2458 temp = I915_READ(dpll_reg); 2349 temp = I915_READ(dpll_reg);
2459 if ((temp & DPLL_VCO_ENABLE) != 0) { 2350 if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -2469,9 +2360,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2469 2360
2470/** 2361/**
2471 * Sets the power management mode of the pipe and plane. 2362 * Sets the power management mode of the pipe and plane.
2472 *
2473 * This code should probably grow support for turning the cursor off and back
2474 * on appropriately at the same time as we're turning the pipe off/on.
2475 */ 2363 */
2476static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 2364static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2477{ 2365{
@@ -2482,9 +2370,29 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2482 int pipe = intel_crtc->pipe; 2370 int pipe = intel_crtc->pipe;
2483 bool enabled; 2371 bool enabled;
2484 2372
2485 dev_priv->display.dpms(crtc, mode); 2373 if (intel_crtc->dpms_mode == mode)
2374 return;
2486 2375
2487 intel_crtc->dpms_mode = mode; 2376 intel_crtc->dpms_mode = mode;
2377 intel_crtc->cursor_on = mode == DRM_MODE_DPMS_ON;
2378
2379 /* When switching on the display, ensure that SR is disabled
2380 * with multiple pipes prior to enabling to new pipe.
2381 *
2382 * When switching off the display, make sure the cursor is
2383 * properly hidden prior to disabling the pipe.
2384 */
2385 if (mode == DRM_MODE_DPMS_ON)
2386 intel_update_watermarks(dev);
2387 else
2388 intel_crtc_update_cursor(crtc);
2389
2390 dev_priv->display.dpms(crtc, mode);
2391
2392 if (mode == DRM_MODE_DPMS_ON)
2393 intel_crtc_update_cursor(crtc);
2394 else
2395 intel_update_watermarks(dev);
2488 2396
2489 if (!dev->primary->master) 2397 if (!dev->primary->master)
2490 return; 2398 return;
@@ -2536,6 +2444,20 @@ void intel_encoder_commit (struct drm_encoder *encoder)
2536 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 2444 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
2537} 2445}
2538 2446
2447void intel_encoder_destroy(struct drm_encoder *encoder)
2448{
2449 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
2450
2451 if (intel_encoder->ddc_bus)
2452 intel_i2c_destroy(intel_encoder->ddc_bus);
2453
2454 if (intel_encoder->i2c_bus)
2455 intel_i2c_destroy(intel_encoder->i2c_bus);
2456
2457 drm_encoder_cleanup(encoder);
2458 kfree(intel_encoder);
2459}
2460
2539static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 2461static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2540 struct drm_display_mode *mode, 2462 struct drm_display_mode *mode,
2541 struct drm_display_mode *adjusted_mode) 2463 struct drm_display_mode *adjusted_mode)
@@ -2867,7 +2789,7 @@ struct cxsr_latency {
2867 unsigned long cursor_hpll_disable; 2789 unsigned long cursor_hpll_disable;
2868}; 2790};
2869 2791
2870static struct cxsr_latency cxsr_latency_table[] = { 2792static const struct cxsr_latency cxsr_latency_table[] = {
2871 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 2793 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
2872 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 2794 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
2873 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 2795 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
@@ -2905,11 +2827,13 @@ static struct cxsr_latency cxsr_latency_table[] = {
2905 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 2827 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
2906}; 2828};
2907 2829
2908static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3, 2830static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
2909 int fsb, int mem) 2831 int is_ddr3,
2832 int fsb,
2833 int mem)
2910{ 2834{
2835 const struct cxsr_latency *latency;
2911 int i; 2836 int i;
2912 struct cxsr_latency *latency;
2913 2837
2914 if (fsb == 0 || mem == 0) 2838 if (fsb == 0 || mem == 0)
2915 return NULL; 2839 return NULL;
@@ -2930,13 +2854,9 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int is_ddr3,
2930static void pineview_disable_cxsr(struct drm_device *dev) 2854static void pineview_disable_cxsr(struct drm_device *dev)
2931{ 2855{
2932 struct drm_i915_private *dev_priv = dev->dev_private; 2856 struct drm_i915_private *dev_priv = dev->dev_private;
2933 u32 reg;
2934 2857
2935 /* deactivate cxsr */ 2858 /* deactivate cxsr */
2936 reg = I915_READ(DSPFW3); 2859 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
2937 reg &= ~(PINEVIEW_SELF_REFRESH_EN);
2938 I915_WRITE(DSPFW3, reg);
2939 DRM_INFO("Big FIFO is disabled\n");
2940} 2860}
2941 2861
2942/* 2862/*
@@ -3024,12 +2944,12 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
3024 int pixel_size) 2944 int pixel_size)
3025{ 2945{
3026 struct drm_i915_private *dev_priv = dev->dev_private; 2946 struct drm_i915_private *dev_priv = dev->dev_private;
2947 const struct cxsr_latency *latency;
3027 u32 reg; 2948 u32 reg;
3028 unsigned long wm; 2949 unsigned long wm;
3029 struct cxsr_latency *latency;
3030 int sr_clock; 2950 int sr_clock;
3031 2951
3032 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 2952 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
3033 dev_priv->fsb_freq, dev_priv->mem_freq); 2953 dev_priv->fsb_freq, dev_priv->mem_freq);
3034 if (!latency) { 2954 if (!latency) {
3035 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 2955 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
@@ -3075,9 +2995,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
3075 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 2995 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
3076 2996
3077 /* activate cxsr */ 2997 /* activate cxsr */
3078 reg = I915_READ(DSPFW3); 2998 I915_WRITE(DSPFW3,
3079 reg |= PINEVIEW_SELF_REFRESH_EN; 2999 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
3080 I915_WRITE(DSPFW3, reg);
3081 DRM_DEBUG_KMS("Self-refresh is enabled\n"); 3000 DRM_DEBUG_KMS("Self-refresh is enabled\n");
3082 } else { 3001 } else {
3083 pineview_disable_cxsr(dev); 3002 pineview_disable_cxsr(dev);
@@ -3354,12 +3273,11 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3354 int line_count; 3273 int line_count;
3355 int planea_htotal = 0, planeb_htotal = 0; 3274 int planea_htotal = 0, planeb_htotal = 0;
3356 struct drm_crtc *crtc; 3275 struct drm_crtc *crtc;
3357 struct intel_crtc *intel_crtc;
3358 3276
3359 /* Need htotal for all active display plane */ 3277 /* Need htotal for all active display plane */
3360 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3278 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3361 intel_crtc = to_intel_crtc(crtc); 3279 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3362 if (crtc->enabled) { 3280 if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
3363 if (intel_crtc->plane == 0) 3281 if (intel_crtc->plane == 0)
3364 planea_htotal = crtc->mode.htotal; 3282 planea_htotal = crtc->mode.htotal;
3365 else 3283 else
@@ -3519,7 +3437,6 @@ static void intel_update_watermarks(struct drm_device *dev)
3519{ 3437{
3520 struct drm_i915_private *dev_priv = dev->dev_private; 3438 struct drm_i915_private *dev_priv = dev->dev_private;
3521 struct drm_crtc *crtc; 3439 struct drm_crtc *crtc;
3522 struct intel_crtc *intel_crtc;
3523 int sr_hdisplay = 0; 3440 int sr_hdisplay = 0;
3524 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; 3441 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
3525 int enabled = 0, pixel_size = 0; 3442 int enabled = 0, pixel_size = 0;
@@ -3530,8 +3447,8 @@ static void intel_update_watermarks(struct drm_device *dev)
3530 3447
3531 /* Get the clock config from both planes */ 3448 /* Get the clock config from both planes */
3532 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3449 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3533 intel_crtc = to_intel_crtc(crtc); 3450 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3534 if (crtc->enabled) { 3451 if (intel_crtc->dpms_mode == DRM_MODE_DPMS_ON) {
3535 enabled++; 3452 enabled++;
3536 if (intel_crtc->plane == 0) { 3453 if (intel_crtc->plane == 0) {
3537 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n", 3454 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
@@ -3589,10 +3506,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3589 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 3506 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
3590 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 3507 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
3591 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3508 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
3592 bool is_edp = false; 3509 struct intel_encoder *has_edp_encoder = NULL;
3593 struct drm_mode_config *mode_config = &dev->mode_config; 3510 struct drm_mode_config *mode_config = &dev->mode_config;
3594 struct drm_encoder *encoder; 3511 struct drm_encoder *encoder;
3595 struct intel_encoder *intel_encoder = NULL;
3596 const intel_limit_t *limit; 3512 const intel_limit_t *limit;
3597 int ret; 3513 int ret;
3598 struct fdi_m_n m_n = {0}; 3514 struct fdi_m_n m_n = {0};
@@ -3613,12 +3529,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3613 drm_vblank_pre_modeset(dev, pipe); 3529 drm_vblank_pre_modeset(dev, pipe);
3614 3530
3615 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 3531 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
3532 struct intel_encoder *intel_encoder;
3616 3533
3617 if (!encoder || encoder->crtc != crtc) 3534 if (encoder->crtc != crtc)
3618 continue; 3535 continue;
3619 3536
3620 intel_encoder = enc_to_intel_encoder(encoder); 3537 intel_encoder = enc_to_intel_encoder(encoder);
3621
3622 switch (intel_encoder->type) { 3538 switch (intel_encoder->type) {
3623 case INTEL_OUTPUT_LVDS: 3539 case INTEL_OUTPUT_LVDS:
3624 is_lvds = true; 3540 is_lvds = true;
@@ -3642,7 +3558,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3642 is_dp = true; 3558 is_dp = true;
3643 break; 3559 break;
3644 case INTEL_OUTPUT_EDP: 3560 case INTEL_OUTPUT_EDP:
3645 is_edp = true; 3561 has_edp_encoder = intel_encoder;
3646 break; 3562 break;
3647 } 3563 }
3648 3564
@@ -3720,10 +3636,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3720 int lane = 0, link_bw, bpp; 3636 int lane = 0, link_bw, bpp;
3721 /* eDP doesn't require FDI link, so just set DP M/N 3637 /* eDP doesn't require FDI link, so just set DP M/N
3722 according to current link config */ 3638 according to current link config */
3723 if (is_edp) { 3639 if (has_edp_encoder) {
3724 target_clock = mode->clock; 3640 target_clock = mode->clock;
3725 intel_edp_link_config(intel_encoder, 3641 intel_edp_link_config(has_edp_encoder,
3726 &lane, &link_bw); 3642 &lane, &link_bw);
3727 } else { 3643 } else {
3728 /* DP over FDI requires target mode clock 3644 /* DP over FDI requires target mode clock
3729 instead of link clock */ 3645 instead of link clock */
@@ -3744,7 +3660,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3744 temp |= PIPE_8BPC; 3660 temp |= PIPE_8BPC;
3745 else 3661 else
3746 temp |= PIPE_6BPC; 3662 temp |= PIPE_6BPC;
3747 } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { 3663 } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) {
3748 switch (dev_priv->edp_bpp/3) { 3664 switch (dev_priv->edp_bpp/3) {
3749 case 8: 3665 case 8:
3750 temp |= PIPE_8BPC; 3666 temp |= PIPE_8BPC;
@@ -3817,7 +3733,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3817 3733
3818 udelay(200); 3734 udelay(200);
3819 3735
3820 if (is_edp) { 3736 if (has_edp_encoder) {
3821 if (dev_priv->lvds_use_ssc) { 3737 if (dev_priv->lvds_use_ssc) {
3822 temp |= DREF_SSC1_ENABLE; 3738 temp |= DREF_SSC1_ENABLE;
3823 I915_WRITE(PCH_DREF_CONTROL, temp); 3739 I915_WRITE(PCH_DREF_CONTROL, temp);
@@ -3966,9 +3882,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3966 dpll_reg = pch_dpll_reg; 3882 dpll_reg = pch_dpll_reg;
3967 } 3883 }
3968 3884
3969 if (is_edp) { 3885 if (!has_edp_encoder) {
3970 ironlake_disable_pll_edp(crtc);
3971 } else if ((dpll & DPLL_VCO_ENABLE)) {
3972 I915_WRITE(fp_reg, fp); 3886 I915_WRITE(fp_reg, fp);
3973 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 3887 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
3974 I915_READ(dpll_reg); 3888 I915_READ(dpll_reg);
@@ -4063,7 +3977,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4063 } 3977 }
4064 } 3978 }
4065 3979
4066 if (!is_edp) { 3980 if (!has_edp_encoder) {
4067 I915_WRITE(fp_reg, fp); 3981 I915_WRITE(fp_reg, fp);
4068 I915_WRITE(dpll_reg, dpll); 3982 I915_WRITE(dpll_reg, dpll);
4069 I915_READ(dpll_reg); 3983 I915_READ(dpll_reg);
@@ -4142,7 +4056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4142 I915_WRITE(link_m1_reg, m_n.link_m); 4056 I915_WRITE(link_m1_reg, m_n.link_m);
4143 I915_WRITE(link_n1_reg, m_n.link_n); 4057 I915_WRITE(link_n1_reg, m_n.link_n);
4144 4058
4145 if (is_edp) { 4059 if (has_edp_encoder) {
4146 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4060 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4147 } else { 4061 } else {
4148 /* enable FDI RX PLL too */ 4062 /* enable FDI RX PLL too */
@@ -4167,7 +4081,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4167 I915_WRITE(pipeconf_reg, pipeconf); 4081 I915_WRITE(pipeconf_reg, pipeconf);
4168 I915_READ(pipeconf_reg); 4082 I915_READ(pipeconf_reg);
4169 4083
4170 intel_wait_for_vblank(dev); 4084 intel_wait_for_vblank(dev, pipe);
4171 4085
4172 if (IS_IRONLAKE(dev)) { 4086 if (IS_IRONLAKE(dev)) {
4173 /* enable address swizzle for tiling buffer */ 4087 /* enable address swizzle for tiling buffer */
@@ -4180,9 +4094,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4180 /* Flush the plane changes */ 4094 /* Flush the plane changes */
4181 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4095 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4182 4096
4183 if ((IS_I965G(dev) || plane == 0))
4184 intel_update_fbc(crtc, &crtc->mode);
4185
4186 intel_update_watermarks(dev); 4097 intel_update_watermarks(dev);
4187 4098
4188 drm_vblank_post_modeset(dev, pipe); 4099 drm_vblank_post_modeset(dev, pipe);
@@ -4216,6 +4127,62 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
4216 } 4127 }
4217} 4128}
4218 4129
4130static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
4131{
4132 struct drm_device *dev = crtc->dev;
4133 struct drm_i915_private *dev_priv = dev->dev_private;
4134 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4135 bool visible = base != 0;
4136 u32 cntl;
4137
4138 if (intel_crtc->cursor_visible == visible)
4139 return;
4140
4141 cntl = I915_READ(CURACNTR);
4142 if (visible) {
4143 /* On these chipsets we can only modify the base whilst
4144 * the cursor is disabled.
4145 */
4146 I915_WRITE(CURABASE, base);
4147
4148 cntl &= ~(CURSOR_FORMAT_MASK);
4149 /* XXX width must be 64, stride 256 => 0x00 << 28 */
4150 cntl |= CURSOR_ENABLE |
4151 CURSOR_GAMMA_ENABLE |
4152 CURSOR_FORMAT_ARGB;
4153 } else
4154 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4155 I915_WRITE(CURACNTR, cntl);
4156
4157 intel_crtc->cursor_visible = visible;
4158}
4159
4160static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
4161{
4162 struct drm_device *dev = crtc->dev;
4163 struct drm_i915_private *dev_priv = dev->dev_private;
4164 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4165 int pipe = intel_crtc->pipe;
4166 bool visible = base != 0;
4167
4168 if (intel_crtc->cursor_visible != visible) {
4169 uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
4170 if (base) {
4171 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4172 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4173 cntl |= pipe << 28; /* Connect to correct pipe */
4174 } else {
4175 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4176 cntl |= CURSOR_MODE_DISABLE;
4177 }
4178 I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
4179
4180 intel_crtc->cursor_visible = visible;
4181 }
4182 /* and commit changes on next vblank */
4183 I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
4184}
4185
4219/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 4186/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
4220static void intel_crtc_update_cursor(struct drm_crtc *crtc) 4187static void intel_crtc_update_cursor(struct drm_crtc *crtc)
4221{ 4188{
@@ -4225,12 +4192,12 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
4225 int pipe = intel_crtc->pipe; 4192 int pipe = intel_crtc->pipe;
4226 int x = intel_crtc->cursor_x; 4193 int x = intel_crtc->cursor_x;
4227 int y = intel_crtc->cursor_y; 4194 int y = intel_crtc->cursor_y;
4228 uint32_t base, pos; 4195 u32 base, pos;
4229 bool visible; 4196 bool visible;
4230 4197
4231 pos = 0; 4198 pos = 0;
4232 4199
4233 if (crtc->fb) { 4200 if (intel_crtc->cursor_on && crtc->fb) {
4234 base = intel_crtc->cursor_addr; 4201 base = intel_crtc->cursor_addr;
4235 if (x > (int) crtc->fb->width) 4202 if (x > (int) crtc->fb->width)
4236 base = 0; 4203 base = 0;
@@ -4259,37 +4226,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc)
4259 pos |= y << CURSOR_Y_SHIFT; 4226 pos |= y << CURSOR_Y_SHIFT;
4260 4227
4261 visible = base != 0; 4228 visible = base != 0;
4262 if (!visible && !intel_crtc->cursor_visble) 4229 if (!visible && !intel_crtc->cursor_visible)
4263 return; 4230 return;
4264 4231
4265 I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); 4232 I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
4266 if (intel_crtc->cursor_visble != visible) { 4233 if (IS_845G(dev) || IS_I865G(dev))
4267 uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); 4234 i845_update_cursor(crtc, base);
4268 if (base) { 4235 else
4269 /* Hooray for CUR*CNTR differences */ 4236 i9xx_update_cursor(crtc, base);
4270 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4271 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4272 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4273 cntl |= pipe << 28; /* Connect to correct pipe */
4274 } else {
4275 cntl &= ~(CURSOR_FORMAT_MASK);
4276 cntl |= CURSOR_ENABLE;
4277 cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
4278 }
4279 } else {
4280 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4281 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4282 cntl |= CURSOR_MODE_DISABLE;
4283 } else {
4284 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4285 }
4286 }
4287 I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
4288
4289 intel_crtc->cursor_visble = visible;
4290 }
4291 /* and commit changes on next vblank */
4292 I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
4293 4237
4294 if (visible) 4238 if (visible)
4295 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); 4239 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
@@ -4354,8 +4298,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4354 4298
4355 addr = obj_priv->gtt_offset; 4299 addr = obj_priv->gtt_offset;
4356 } else { 4300 } else {
4301 int align = IS_I830(dev) ? 16 * 1024 : 256;
4357 ret = i915_gem_attach_phys_object(dev, bo, 4302 ret = i915_gem_attach_phys_object(dev, bo,
4358 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); 4303 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
4304 align);
4359 if (ret) { 4305 if (ret) {
4360 DRM_ERROR("failed to attach phys object\n"); 4306 DRM_ERROR("failed to attach phys object\n");
4361 goto fail_locked; 4307 goto fail_locked;
@@ -4544,7 +4490,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
4544 encoder_funcs->commit(encoder); 4490 encoder_funcs->commit(encoder);
4545 } 4491 }
4546 /* let the connector get through one full cycle before testing */ 4492 /* let the connector get through one full cycle before testing */
4547 intel_wait_for_vblank(dev); 4493 intel_wait_for_vblank(dev, intel_crtc->pipe);
4548 4494
4549 return crtc; 4495 return crtc;
4550} 4496}
@@ -4749,7 +4695,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
4749 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 4695 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
4750 I915_WRITE(dpll_reg, dpll); 4696 I915_WRITE(dpll_reg, dpll);
4751 dpll = I915_READ(dpll_reg); 4697 dpll = I915_READ(dpll_reg);
4752 intel_wait_for_vblank(dev); 4698 intel_wait_for_vblank(dev, pipe);
4753 dpll = I915_READ(dpll_reg); 4699 dpll = I915_READ(dpll_reg);
4754 if (dpll & DISPLAY_RATE_SELECT_FPA1) 4700 if (dpll & DISPLAY_RATE_SELECT_FPA1)
4755 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 4701 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -4793,7 +4739,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
4793 dpll |= DISPLAY_RATE_SELECT_FPA1; 4739 dpll |= DISPLAY_RATE_SELECT_FPA1;
4794 I915_WRITE(dpll_reg, dpll); 4740 I915_WRITE(dpll_reg, dpll);
4795 dpll = I915_READ(dpll_reg); 4741 dpll = I915_READ(dpll_reg);
4796 intel_wait_for_vblank(dev); 4742 intel_wait_for_vblank(dev, pipe);
4797 dpll = I915_READ(dpll_reg); 4743 dpll = I915_READ(dpll_reg);
4798 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 4744 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
4799 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 4745 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
@@ -4916,15 +4862,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
4916 kfree(intel_crtc); 4862 kfree(intel_crtc);
4917} 4863}
4918 4864
4919struct intel_unpin_work {
4920 struct work_struct work;
4921 struct drm_device *dev;
4922 struct drm_gem_object *old_fb_obj;
4923 struct drm_gem_object *pending_flip_obj;
4924 struct drm_pending_vblank_event *event;
4925 int pending;
4926};
4927
4928static void intel_unpin_work_fn(struct work_struct *__work) 4865static void intel_unpin_work_fn(struct work_struct *__work)
4929{ 4866{
4930 struct intel_unpin_work *work = 4867 struct intel_unpin_work *work =
@@ -5012,7 +4949,8 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
5012 4949
5013 spin_lock_irqsave(&dev->event_lock, flags); 4950 spin_lock_irqsave(&dev->event_lock, flags);
5014 if (intel_crtc->unpin_work) { 4951 if (intel_crtc->unpin_work) {
5015 intel_crtc->unpin_work->pending = 1; 4952 if ((++intel_crtc->unpin_work->pending) > 1)
4953 DRM_ERROR("Prepared flip multiple times\n");
5016 } else { 4954 } else {
5017 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); 4955 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
5018 } 4956 }
@@ -5031,9 +4969,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5031 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4969 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5032 struct intel_unpin_work *work; 4970 struct intel_unpin_work *work;
5033 unsigned long flags, offset; 4971 unsigned long flags, offset;
5034 int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; 4972 int pipe = intel_crtc->pipe;
5035 int ret, pipesrc; 4973 u32 pf, pipesrc;
5036 u32 flip_mask; 4974 int ret;
5037 4975
5038 work = kzalloc(sizeof *work, GFP_KERNEL); 4976 work = kzalloc(sizeof *work, GFP_KERNEL);
5039 if (work == NULL) 4977 if (work == NULL)
@@ -5082,34 +5020,73 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5082 atomic_inc(&obj_priv->pending_flip); 5020 atomic_inc(&obj_priv->pending_flip);
5083 work->pending_flip_obj = obj; 5021 work->pending_flip_obj = obj;
5084 5022
5085 if (intel_crtc->plane) 5023 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5086 flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 5024 u32 flip_mask;
5087 else
5088 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
5089 5025
5090 /* Wait for any previous flip to finish */ 5026 if (intel_crtc->plane)
5091 if (IS_GEN3(dev)) 5027 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5092 while (I915_READ(ISR) & flip_mask) 5028 else
5093 ; 5029 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5030
5031 BEGIN_LP_RING(2);
5032 OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5033 OUT_RING(0);
5034 ADVANCE_LP_RING();
5035 }
5036
5037 work->enable_stall_check = true;
5094 5038
5095 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5039 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5096 offset = obj_priv->gtt_offset; 5040 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
5097 offset += (crtc->y * fb->pitch) + (crtc->x * (fb->bits_per_pixel) / 8);
5098 5041
5099 BEGIN_LP_RING(4); 5042 BEGIN_LP_RING(4);
5100 if (IS_I965G(dev)) { 5043 switch(INTEL_INFO(dev)->gen) {
5044 case 2:
5101 OUT_RING(MI_DISPLAY_FLIP | 5045 OUT_RING(MI_DISPLAY_FLIP |
5102 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5046 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5103 OUT_RING(fb->pitch); 5047 OUT_RING(fb->pitch);
5104 OUT_RING(offset | obj_priv->tiling_mode); 5048 OUT_RING(obj_priv->gtt_offset + offset);
5105 pipesrc = I915_READ(pipesrc_reg); 5049 OUT_RING(MI_NOOP);
5106 OUT_RING(pipesrc & 0x0fff0fff); 5050 break;
5107 } else { 5051
5052 case 3:
5108 OUT_RING(MI_DISPLAY_FLIP_I915 | 5053 OUT_RING(MI_DISPLAY_FLIP_I915 |
5109 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5054 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5110 OUT_RING(fb->pitch); 5055 OUT_RING(fb->pitch);
5111 OUT_RING(offset); 5056 OUT_RING(obj_priv->gtt_offset + offset);
5112 OUT_RING(MI_NOOP); 5057 OUT_RING(MI_NOOP);
5058 break;
5059
5060 case 4:
5061 case 5:
5062 /* i965+ uses the linear or tiled offsets from the
5063 * Display Registers (which do not change across a page-flip)
5064 * so we need only reprogram the base address.
5065 */
5066 OUT_RING(MI_DISPLAY_FLIP |
5067 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5068 OUT_RING(fb->pitch);
5069 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
5070
5071 /* XXX Enabling the panel-fitter across page-flip is so far
5072 * untested on non-native modes, so ignore it for now.
5073 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5074 */
5075 pf = 0;
5076 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
5077 OUT_RING(pf | pipesrc);
5078 break;
5079
5080 case 6:
5081 OUT_RING(MI_DISPLAY_FLIP |
5082 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5083 OUT_RING(fb->pitch | obj_priv->tiling_mode);
5084 OUT_RING(obj_priv->gtt_offset);
5085
5086 pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5087 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
5088 OUT_RING(pf | pipesrc);
5089 break;
5113 } 5090 }
5114 ADVANCE_LP_RING(); 5091 ADVANCE_LP_RING();
5115 5092
@@ -5190,7 +5167,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5190 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5167 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5191 5168
5192 intel_crtc->cursor_addr = 0; 5169 intel_crtc->cursor_addr = 0;
5193 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 5170 intel_crtc->dpms_mode = -1;
5194 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 5171 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5195 5172
5196 intel_crtc->busy = false; 5173 intel_crtc->busy = false;
@@ -5432,37 +5409,37 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
5432}; 5409};
5433 5410
5434static struct drm_gem_object * 5411static struct drm_gem_object *
5435intel_alloc_power_context(struct drm_device *dev) 5412intel_alloc_context_page(struct drm_device *dev)
5436{ 5413{
5437 struct drm_gem_object *pwrctx; 5414 struct drm_gem_object *ctx;
5438 int ret; 5415 int ret;
5439 5416
5440 pwrctx = i915_gem_alloc_object(dev, 4096); 5417 ctx = i915_gem_alloc_object(dev, 4096);
5441 if (!pwrctx) { 5418 if (!ctx) {
5442 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 5419 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
5443 return NULL; 5420 return NULL;
5444 } 5421 }
5445 5422
5446 mutex_lock(&dev->struct_mutex); 5423 mutex_lock(&dev->struct_mutex);
5447 ret = i915_gem_object_pin(pwrctx, 4096); 5424 ret = i915_gem_object_pin(ctx, 4096);
5448 if (ret) { 5425 if (ret) {
5449 DRM_ERROR("failed to pin power context: %d\n", ret); 5426 DRM_ERROR("failed to pin power context: %d\n", ret);
5450 goto err_unref; 5427 goto err_unref;
5451 } 5428 }
5452 5429
5453 ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); 5430 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
5454 if (ret) { 5431 if (ret) {
5455 DRM_ERROR("failed to set-domain on power context: %d\n", ret); 5432 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
5456 goto err_unpin; 5433 goto err_unpin;
5457 } 5434 }
5458 mutex_unlock(&dev->struct_mutex); 5435 mutex_unlock(&dev->struct_mutex);
5459 5436
5460 return pwrctx; 5437 return ctx;
5461 5438
5462err_unpin: 5439err_unpin:
5463 i915_gem_object_unpin(pwrctx); 5440 i915_gem_object_unpin(ctx);
5464err_unref: 5441err_unref:
5465 drm_gem_object_unreference(pwrctx); 5442 drm_gem_object_unreference(ctx);
5466 mutex_unlock(&dev->struct_mutex); 5443 mutex_unlock(&dev->struct_mutex);
5467 return NULL; 5444 return NULL;
5468} 5445}
@@ -5494,7 +5471,6 @@ void ironlake_enable_drps(struct drm_device *dev)
5494 struct drm_i915_private *dev_priv = dev->dev_private; 5471 struct drm_i915_private *dev_priv = dev->dev_private;
5495 u32 rgvmodectl = I915_READ(MEMMODECTL); 5472 u32 rgvmodectl = I915_READ(MEMMODECTL);
5496 u8 fmax, fmin, fstart, vstart; 5473 u8 fmax, fmin, fstart, vstart;
5497 int i = 0;
5498 5474
5499 /* 100ms RC evaluation intervals */ 5475 /* 100ms RC evaluation intervals */
5500 I915_WRITE(RCUPEI, 100000); 5476 I915_WRITE(RCUPEI, 100000);
@@ -5538,13 +5514,8 @@ void ironlake_enable_drps(struct drm_device *dev)
5538 rgvmodectl |= MEMMODE_SWMODE_EN; 5514 rgvmodectl |= MEMMODE_SWMODE_EN;
5539 I915_WRITE(MEMMODECTL, rgvmodectl); 5515 I915_WRITE(MEMMODECTL, rgvmodectl);
5540 5516
5541 while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) { 5517 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 1, 0))
5542 if (i++ > 100) { 5518 DRM_ERROR("stuck trying to change perf mode\n");
5543 DRM_ERROR("stuck trying to change perf mode\n");
5544 break;
5545 }
5546 msleep(1);
5547 }
5548 msleep(1); 5519 msleep(1);
5549 5520
5550 ironlake_set_drps(dev, fstart); 5521 ironlake_set_drps(dev, fstart);
@@ -5725,7 +5696,8 @@ void intel_init_clock_gating(struct drm_device *dev)
5725 ILK_DPFC_DIS2 | 5696 ILK_DPFC_DIS2 |
5726 ILK_CLK_FBC); 5697 ILK_CLK_FBC);
5727 } 5698 }
5728 return; 5699 if (IS_GEN6(dev))
5700 return;
5729 } else if (IS_G4X(dev)) { 5701 } else if (IS_G4X(dev)) {
5730 uint32_t dspclk_gate; 5702 uint32_t dspclk_gate;
5731 I915_WRITE(RENCLK_GATE_D1, 0); 5703 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5768,6 +5740,31 @@ void intel_init_clock_gating(struct drm_device *dev)
5768 * GPU can automatically power down the render unit if given a page 5740 * GPU can automatically power down the render unit if given a page
5769 * to save state. 5741 * to save state.
5770 */ 5742 */
5743 if (IS_IRONLAKE_M(dev)) {
5744 if (dev_priv->renderctx == NULL)
5745 dev_priv->renderctx = intel_alloc_context_page(dev);
5746 if (dev_priv->renderctx) {
5747 struct drm_i915_gem_object *obj_priv;
5748 obj_priv = to_intel_bo(dev_priv->renderctx);
5749 if (obj_priv) {
5750 BEGIN_LP_RING(4);
5751 OUT_RING(MI_SET_CONTEXT);
5752 OUT_RING(obj_priv->gtt_offset |
5753 MI_MM_SPACE_GTT |
5754 MI_SAVE_EXT_STATE_EN |
5755 MI_RESTORE_EXT_STATE_EN |
5756 MI_RESTORE_INHIBIT);
5757 OUT_RING(MI_NOOP);
5758 OUT_RING(MI_FLUSH);
5759 ADVANCE_LP_RING();
5760 }
5761 } else {
5762 DRM_DEBUG_KMS("Failed to allocate render context."
5763 "Disable RC6\n");
5764 return;
5765 }
5766 }
5767
5771 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { 5768 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
5772 struct drm_i915_gem_object *obj_priv = NULL; 5769 struct drm_i915_gem_object *obj_priv = NULL;
5773 5770
@@ -5776,7 +5773,7 @@ void intel_init_clock_gating(struct drm_device *dev)
5776 } else { 5773 } else {
5777 struct drm_gem_object *pwrctx; 5774 struct drm_gem_object *pwrctx;
5778 5775
5779 pwrctx = intel_alloc_power_context(dev); 5776 pwrctx = intel_alloc_context_page(dev);
5780 if (pwrctx) { 5777 if (pwrctx) {
5781 dev_priv->pwrctx = pwrctx; 5778 dev_priv->pwrctx = pwrctx;
5782 obj_priv = to_intel_bo(pwrctx); 5779 obj_priv = to_intel_bo(pwrctx);
@@ -5948,6 +5945,29 @@ static void intel_init_quirks(struct drm_device *dev)
5948 } 5945 }
5949} 5946}
5950 5947
5948/* Disable the VGA plane that we never use */
5949static void i915_disable_vga(struct drm_device *dev)
5950{
5951 struct drm_i915_private *dev_priv = dev->dev_private;
5952 u8 sr1;
5953 u32 vga_reg;
5954
5955 if (HAS_PCH_SPLIT(dev))
5956 vga_reg = CPU_VGACNTRL;
5957 else
5958 vga_reg = VGACNTRL;
5959
5960 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
5961 outb(1, VGA_SR_INDEX);
5962 sr1 = inb(VGA_SR_DATA);
5963 outb(sr1 | 1<<5, VGA_SR_DATA);
5964 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
5965 udelay(300);
5966
5967 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
5968 POSTING_READ(vga_reg);
5969}
5970
5951void intel_modeset_init(struct drm_device *dev) 5971void intel_modeset_init(struct drm_device *dev)
5952{ 5972{
5953 struct drm_i915_private *dev_priv = dev->dev_private; 5973 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5996,6 +6016,9 @@ void intel_modeset_init(struct drm_device *dev)
5996 6016
5997 intel_init_clock_gating(dev); 6017 intel_init_clock_gating(dev);
5998 6018
6019 /* Just disable it once at startup */
6020 i915_disable_vga(dev);
6021
5999 if (IS_IRONLAKE_M(dev)) { 6022 if (IS_IRONLAKE_M(dev)) {
6000 ironlake_enable_drps(dev); 6023 ironlake_enable_drps(dev);
6001 intel_init_emon(dev); 6024 intel_init_emon(dev);
@@ -6034,6 +6057,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
6034 if (dev_priv->display.disable_fbc) 6057 if (dev_priv->display.disable_fbc)
6035 dev_priv->display.disable_fbc(dev); 6058 dev_priv->display.disable_fbc(dev);
6036 6059
6060 if (dev_priv->renderctx) {
6061 struct drm_i915_gem_object *obj_priv;
6062
6063 obj_priv = to_intel_bo(dev_priv->renderctx);
6064 I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
6065 I915_READ(CCID);
6066 i915_gem_object_unpin(dev_priv->renderctx);
6067 drm_gem_object_unreference(dev_priv->renderctx);
6068 }
6069
6037 if (dev_priv->pwrctx) { 6070 if (dev_priv->pwrctx) {
6038 struct drm_i915_gem_object *obj_priv; 6071 struct drm_i915_gem_object *obj_priv;
6039 6072
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 40be1fa65be1..51d142939a26 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -42,10 +42,11 @@
42 42
43#define DP_LINK_CONFIGURATION_SIZE 9 43#define DP_LINK_CONFIGURATION_SIZE 9
44 44
45#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) 45#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP)
46#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp) 46#define IS_PCH_eDP(i) ((i)->is_pch_edp)
47 47
48struct intel_dp_priv { 48struct intel_dp {
49 struct intel_encoder base;
49 uint32_t output_reg; 50 uint32_t output_reg;
50 uint32_t DP; 51 uint32_t DP;
51 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; 52 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
@@ -54,40 +55,39 @@ struct intel_dp_priv {
54 uint8_t link_bw; 55 uint8_t link_bw;
55 uint8_t lane_count; 56 uint8_t lane_count;
56 uint8_t dpcd[4]; 57 uint8_t dpcd[4];
57 struct intel_encoder *intel_encoder;
58 struct i2c_adapter adapter; 58 struct i2c_adapter adapter;
59 struct i2c_algo_dp_aux_data algo; 59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp; 60 bool is_pch_edp;
61}; 61};
62 62
63static void 63static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
64intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, 64{
65 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); 65 return container_of(enc_to_intel_encoder(encoder), struct intel_dp, base);
66}
66 67
67static void 68static void intel_dp_link_train(struct intel_dp *intel_dp);
68intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP); 69static void intel_dp_link_down(struct intel_dp *intel_dp);
69 70
70void 71void
71intel_edp_link_config (struct intel_encoder *intel_encoder, 72intel_edp_link_config (struct intel_encoder *intel_encoder,
72 int *lane_num, int *link_bw) 73 int *lane_num, int *link_bw)
73{ 74{
74 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 75 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
75 76
76 *lane_num = dp_priv->lane_count; 77 *lane_num = intel_dp->lane_count;
77 if (dp_priv->link_bw == DP_LINK_BW_1_62) 78 if (intel_dp->link_bw == DP_LINK_BW_1_62)
78 *link_bw = 162000; 79 *link_bw = 162000;
79 else if (dp_priv->link_bw == DP_LINK_BW_2_7) 80 else if (intel_dp->link_bw == DP_LINK_BW_2_7)
80 *link_bw = 270000; 81 *link_bw = 270000;
81} 82}
82 83
83static int 84static int
84intel_dp_max_lane_count(struct intel_encoder *intel_encoder) 85intel_dp_max_lane_count(struct intel_dp *intel_dp)
85{ 86{
86 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
87 int max_lane_count = 4; 87 int max_lane_count = 4;
88 88
89 if (dp_priv->dpcd[0] >= 0x11) { 89 if (intel_dp->dpcd[0] >= 0x11) {
90 max_lane_count = dp_priv->dpcd[2] & 0x1f; 90 max_lane_count = intel_dp->dpcd[2] & 0x1f;
91 switch (max_lane_count) { 91 switch (max_lane_count) {
92 case 1: case 2: case 4: 92 case 1: case 2: case 4:
93 break; 93 break;
@@ -99,10 +99,9 @@ intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
99} 99}
100 100
101static int 101static int
102intel_dp_max_link_bw(struct intel_encoder *intel_encoder) 102intel_dp_max_link_bw(struct intel_dp *intel_dp)
103{ 103{
104 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 104 int max_link_bw = intel_dp->dpcd[1];
105 int max_link_bw = dp_priv->dpcd[1];
106 105
107 switch (max_link_bw) { 106 switch (max_link_bw) {
108 case DP_LINK_BW_1_62: 107 case DP_LINK_BW_1_62:
@@ -126,13 +125,11 @@ intel_dp_link_clock(uint8_t link_bw)
126 125
127/* I think this is a fiction */ 126/* I think this is a fiction */
128static int 127static int
129intel_dp_link_required(struct drm_device *dev, 128intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
130 struct intel_encoder *intel_encoder, int pixel_clock)
131{ 129{
132 struct drm_i915_private *dev_priv = dev->dev_private; 130 struct drm_i915_private *dev_priv = dev->dev_private;
133 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
134 131
135 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) 132 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
136 return (pixel_clock * dev_priv->edp_bpp) / 8; 133 return (pixel_clock * dev_priv->edp_bpp) / 8;
137 else 134 else
138 return pixel_clock * 3; 135 return pixel_clock * 3;
@@ -149,14 +146,13 @@ intel_dp_mode_valid(struct drm_connector *connector,
149 struct drm_display_mode *mode) 146 struct drm_display_mode *mode)
150{ 147{
151 struct drm_encoder *encoder = intel_attached_encoder(connector); 148 struct drm_encoder *encoder = intel_attached_encoder(connector);
152 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 149 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
153 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
154 struct drm_device *dev = connector->dev; 150 struct drm_device *dev = connector->dev;
155 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_private *dev_priv = dev->dev_private;
156 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); 152 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
157 int max_lanes = intel_dp_max_lane_count(intel_encoder); 153 int max_lanes = intel_dp_max_lane_count(intel_dp);
158 154
159 if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && 155 if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
160 dev_priv->panel_fixed_mode) { 156 dev_priv->panel_fixed_mode) {
161 if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) 157 if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
162 return MODE_PANEL; 158 return MODE_PANEL;
@@ -167,8 +163,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
167 163
168 /* only refuse the mode on non eDP since we have seen some wierd eDP panels 164 /* only refuse the mode on non eDP since we have seen some wierd eDP panels
169 which are outside spec tolerances but somehow work by magic */ 165 which are outside spec tolerances but somehow work by magic */
170 if (!IS_eDP(intel_encoder) && 166 if (!IS_eDP(intel_dp) &&
171 (intel_dp_link_required(connector->dev, intel_encoder, mode->clock) 167 (intel_dp_link_required(connector->dev, intel_dp, mode->clock)
172 > intel_dp_max_data_rate(max_link_clock, max_lanes))) 168 > intel_dp_max_data_rate(max_link_clock, max_lanes)))
173 return MODE_CLOCK_HIGH; 169 return MODE_CLOCK_HIGH;
174 170
@@ -232,19 +228,17 @@ intel_hrawclk(struct drm_device *dev)
232} 228}
233 229
234static int 230static int
235intel_dp_aux_ch(struct intel_encoder *intel_encoder, 231intel_dp_aux_ch(struct intel_dp *intel_dp,
236 uint8_t *send, int send_bytes, 232 uint8_t *send, int send_bytes,
237 uint8_t *recv, int recv_size) 233 uint8_t *recv, int recv_size)
238{ 234{
239 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 235 uint32_t output_reg = intel_dp->output_reg;
240 uint32_t output_reg = dp_priv->output_reg; 236 struct drm_device *dev = intel_dp->base.enc.dev;
241 struct drm_device *dev = intel_encoder->enc.dev;
242 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
243 uint32_t ch_ctl = output_reg + 0x10; 238 uint32_t ch_ctl = output_reg + 0x10;
244 uint32_t ch_data = ch_ctl + 4; 239 uint32_t ch_data = ch_ctl + 4;
245 int i; 240 int i;
246 int recv_bytes; 241 int recv_bytes;
247 uint32_t ctl;
248 uint32_t status; 242 uint32_t status;
249 uint32_t aux_clock_divider; 243 uint32_t aux_clock_divider;
250 int try, precharge; 244 int try, precharge;
@@ -253,7 +247,7 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
253 * and would like to run at 2MHz. So, take the 247 * and would like to run at 2MHz. So, take the
254 * hrawclk value and divide by 2 and use that 248 * hrawclk value and divide by 2 and use that
255 */ 249 */
256 if (IS_eDP(intel_encoder)) { 250 if (IS_eDP(intel_dp)) {
257 if (IS_GEN6(dev)) 251 if (IS_GEN6(dev))
258 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ 252 aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
259 else 253 else
@@ -268,41 +262,43 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
268 else 262 else
269 precharge = 5; 263 precharge = 5;
270 264
265 if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
266 DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
267 I915_READ(ch_ctl));
268 return -EBUSY;
269 }
270
271 /* Must try at least 3 times according to DP spec */ 271 /* Must try at least 3 times according to DP spec */
272 for (try = 0; try < 5; try++) { 272 for (try = 0; try < 5; try++) {
273 /* Load the send data into the aux channel data registers */ 273 /* Load the send data into the aux channel data registers */
274 for (i = 0; i < send_bytes; i += 4) { 274 for (i = 0; i < send_bytes; i += 4)
275 uint32_t d = pack_aux(send + i, send_bytes - i); 275 I915_WRITE(ch_data + i,
276 276 pack_aux(send + i, send_bytes - i));
277 I915_WRITE(ch_data + i, d);
278 }
279
280 ctl = (DP_AUX_CH_CTL_SEND_BUSY |
281 DP_AUX_CH_CTL_TIME_OUT_400us |
282 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
283 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
284 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
285 DP_AUX_CH_CTL_DONE |
286 DP_AUX_CH_CTL_TIME_OUT_ERROR |
287 DP_AUX_CH_CTL_RECEIVE_ERROR);
288 277
289 /* Send the command and wait for it to complete */ 278 /* Send the command and wait for it to complete */
290 I915_WRITE(ch_ctl, ctl); 279 I915_WRITE(ch_ctl,
291 (void) I915_READ(ch_ctl); 280 DP_AUX_CH_CTL_SEND_BUSY |
281 DP_AUX_CH_CTL_TIME_OUT_400us |
282 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
283 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
284 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
285 DP_AUX_CH_CTL_DONE |
286 DP_AUX_CH_CTL_TIME_OUT_ERROR |
287 DP_AUX_CH_CTL_RECEIVE_ERROR);
292 for (;;) { 288 for (;;) {
293 udelay(100);
294 status = I915_READ(ch_ctl); 289 status = I915_READ(ch_ctl);
295 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 290 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
296 break; 291 break;
292 udelay(100);
297 } 293 }
298 294
299 /* Clear done status and any errors */ 295 /* Clear done status and any errors */
300 I915_WRITE(ch_ctl, (status | 296 I915_WRITE(ch_ctl,
301 DP_AUX_CH_CTL_DONE | 297 status |
302 DP_AUX_CH_CTL_TIME_OUT_ERROR | 298 DP_AUX_CH_CTL_DONE |
303 DP_AUX_CH_CTL_RECEIVE_ERROR)); 299 DP_AUX_CH_CTL_TIME_OUT_ERROR |
304 (void) I915_READ(ch_ctl); 300 DP_AUX_CH_CTL_RECEIVE_ERROR);
305 if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) 301 if (status & DP_AUX_CH_CTL_DONE)
306 break; 302 break;
307 } 303 }
308 304
@@ -329,22 +325,19 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder,
329 /* Unload any bytes sent back from the other side */ 325 /* Unload any bytes sent back from the other side */
330 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 326 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
331 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 327 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
332
333 if (recv_bytes > recv_size) 328 if (recv_bytes > recv_size)
334 recv_bytes = recv_size; 329 recv_bytes = recv_size;
335 330
336 for (i = 0; i < recv_bytes; i += 4) { 331 for (i = 0; i < recv_bytes; i += 4)
337 uint32_t d = I915_READ(ch_data + i); 332 unpack_aux(I915_READ(ch_data + i),
338 333 recv + i, recv_bytes - i);
339 unpack_aux(d, recv + i, recv_bytes - i);
340 }
341 334
342 return recv_bytes; 335 return recv_bytes;
343} 336}
344 337
345/* Write data to the aux channel in native mode */ 338/* Write data to the aux channel in native mode */
346static int 339static int
347intel_dp_aux_native_write(struct intel_encoder *intel_encoder, 340intel_dp_aux_native_write(struct intel_dp *intel_dp,
348 uint16_t address, uint8_t *send, int send_bytes) 341 uint16_t address, uint8_t *send, int send_bytes)
349{ 342{
350 int ret; 343 int ret;
@@ -361,7 +354,7 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
361 memcpy(&msg[4], send, send_bytes); 354 memcpy(&msg[4], send, send_bytes);
362 msg_bytes = send_bytes + 4; 355 msg_bytes = send_bytes + 4;
363 for (;;) { 356 for (;;) {
364 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1); 357 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
365 if (ret < 0) 358 if (ret < 0)
366 return ret; 359 return ret;
367 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 360 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -376,15 +369,15 @@ intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
376 369
377/* Write a single byte to the aux channel in native mode */ 370/* Write a single byte to the aux channel in native mode */
378static int 371static int
379intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder, 372intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
380 uint16_t address, uint8_t byte) 373 uint16_t address, uint8_t byte)
381{ 374{
382 return intel_dp_aux_native_write(intel_encoder, address, &byte, 1); 375 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
383} 376}
384 377
385/* read bytes from a native aux channel */ 378/* read bytes from a native aux channel */
386static int 379static int
387intel_dp_aux_native_read(struct intel_encoder *intel_encoder, 380intel_dp_aux_native_read(struct intel_dp *intel_dp,
388 uint16_t address, uint8_t *recv, int recv_bytes) 381 uint16_t address, uint8_t *recv, int recv_bytes)
389{ 382{
390 uint8_t msg[4]; 383 uint8_t msg[4];
@@ -403,7 +396,7 @@ intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
403 reply_bytes = recv_bytes + 1; 396 reply_bytes = recv_bytes + 1;
404 397
405 for (;;) { 398 for (;;) {
406 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, 399 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
407 reply, reply_bytes); 400 reply, reply_bytes);
408 if (ret == 0) 401 if (ret == 0)
409 return -EPROTO; 402 return -EPROTO;
@@ -426,10 +419,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
426 uint8_t write_byte, uint8_t *read_byte) 419 uint8_t write_byte, uint8_t *read_byte)
427{ 420{
428 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 421 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
429 struct intel_dp_priv *dp_priv = container_of(adapter, 422 struct intel_dp *intel_dp = container_of(adapter,
430 struct intel_dp_priv, 423 struct intel_dp,
431 adapter); 424 adapter);
432 struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
433 uint16_t address = algo_data->address; 425 uint16_t address = algo_data->address;
434 uint8_t msg[5]; 426 uint8_t msg[5];
435 uint8_t reply[2]; 427 uint8_t reply[2];
@@ -468,7 +460,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
468 } 460 }
469 461
470 for (;;) { 462 for (;;) {
471 ret = intel_dp_aux_ch(intel_encoder, 463 ret = intel_dp_aux_ch(intel_dp,
472 msg, msg_bytes, 464 msg, msg_bytes,
473 reply, reply_bytes); 465 reply, reply_bytes);
474 if (ret < 0) { 466 if (ret < 0) {
@@ -496,57 +488,42 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
496} 488}
497 489
498static int 490static int
499intel_dp_i2c_init(struct intel_encoder *intel_encoder, 491intel_dp_i2c_init(struct intel_dp *intel_dp,
500 struct intel_connector *intel_connector, const char *name) 492 struct intel_connector *intel_connector, const char *name)
501{ 493{
502 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
503
504 DRM_DEBUG_KMS("i2c_init %s\n", name); 494 DRM_DEBUG_KMS("i2c_init %s\n", name);
505 dp_priv->algo.running = false; 495 intel_dp->algo.running = false;
506 dp_priv->algo.address = 0; 496 intel_dp->algo.address = 0;
507 dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; 497 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
508 498
509 memset(&dp_priv->adapter, '\0', sizeof (dp_priv->adapter)); 499 memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
510 dp_priv->adapter.owner = THIS_MODULE; 500 intel_dp->adapter.owner = THIS_MODULE;
511 dp_priv->adapter.class = I2C_CLASS_DDC; 501 intel_dp->adapter.class = I2C_CLASS_DDC;
512 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); 502 strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
513 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; 503 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
514 dp_priv->adapter.algo_data = &dp_priv->algo; 504 intel_dp->adapter.algo_data = &intel_dp->algo;
515 dp_priv->adapter.dev.parent = &intel_connector->base.kdev; 505 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
516 506
517 return i2c_dp_aux_add_bus(&dp_priv->adapter); 507 return i2c_dp_aux_add_bus(&intel_dp->adapter);
518} 508}
519 509
520static bool 510static bool
521intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, 511intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
522 struct drm_display_mode *adjusted_mode) 512 struct drm_display_mode *adjusted_mode)
523{ 513{
524 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
525 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
526 struct drm_device *dev = encoder->dev; 514 struct drm_device *dev = encoder->dev;
527 struct drm_i915_private *dev_priv = dev->dev_private; 515 struct drm_i915_private *dev_priv = dev->dev_private;
516 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
528 int lane_count, clock; 517 int lane_count, clock;
529 int max_lane_count = intel_dp_max_lane_count(intel_encoder); 518 int max_lane_count = intel_dp_max_lane_count(intel_dp);
530 int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; 519 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
531 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 520 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
532 521
533 if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && 522 if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
534 dev_priv->panel_fixed_mode) { 523 dev_priv->panel_fixed_mode) {
535 struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; 524 intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
536 525 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
537 adjusted_mode->hdisplay = fixed_mode->hdisplay; 526 mode, adjusted_mode);
538 adjusted_mode->hsync_start = fixed_mode->hsync_start;
539 adjusted_mode->hsync_end = fixed_mode->hsync_end;
540 adjusted_mode->htotal = fixed_mode->htotal;
541
542 adjusted_mode->vdisplay = fixed_mode->vdisplay;
543 adjusted_mode->vsync_start = fixed_mode->vsync_start;
544 adjusted_mode->vsync_end = fixed_mode->vsync_end;
545 adjusted_mode->vtotal = fixed_mode->vtotal;
546
547 adjusted_mode->clock = fixed_mode->clock;
548 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
549
550 /* 527 /*
551 * the mode->clock is used to calculate the Data&Link M/N 528 * the mode->clock is used to calculate the Data&Link M/N
552 * of the pipe. For the eDP the fixed clock should be used. 529 * of the pipe. For the eDP the fixed clock should be used.
@@ -558,31 +535,33 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
558 for (clock = 0; clock <= max_clock; clock++) { 535 for (clock = 0; clock <= max_clock; clock++) {
559 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 536 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
560 537
561 if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock) 538 if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
562 <= link_avail) { 539 <= link_avail) {
563 dp_priv->link_bw = bws[clock]; 540 intel_dp->link_bw = bws[clock];
564 dp_priv->lane_count = lane_count; 541 intel_dp->lane_count = lane_count;
565 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 542 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
566 DRM_DEBUG_KMS("Display port link bw %02x lane " 543 DRM_DEBUG_KMS("Display port link bw %02x lane "
567 "count %d clock %d\n", 544 "count %d clock %d\n",
568 dp_priv->link_bw, dp_priv->lane_count, 545 intel_dp->link_bw, intel_dp->lane_count,
569 adjusted_mode->clock); 546 adjusted_mode->clock);
570 return true; 547 return true;
571 } 548 }
572 } 549 }
573 } 550 }
574 551
575 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { 552 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
576 /* okay we failed just pick the highest */ 553 /* okay we failed just pick the highest */
577 dp_priv->lane_count = max_lane_count; 554 intel_dp->lane_count = max_lane_count;
578 dp_priv->link_bw = bws[max_clock]; 555 intel_dp->link_bw = bws[max_clock];
579 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 556 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
580 DRM_DEBUG_KMS("Force picking display port link bw %02x lane " 557 DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
581 "count %d clock %d\n", 558 "count %d clock %d\n",
582 dp_priv->link_bw, dp_priv->lane_count, 559 intel_dp->link_bw, intel_dp->lane_count,
583 adjusted_mode->clock); 560 adjusted_mode->clock);
561
584 return true; 562 return true;
585 } 563 }
564
586 return false; 565 return false;
587} 566}
588 567
@@ -626,17 +605,14 @@ bool intel_pch_has_edp(struct drm_crtc *crtc)
626 struct drm_encoder *encoder; 605 struct drm_encoder *encoder;
627 606
628 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 607 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
629 struct intel_encoder *intel_encoder; 608 struct intel_dp *intel_dp;
630 struct intel_dp_priv *dp_priv;
631 609
632 if (!encoder || encoder->crtc != crtc) 610 if (encoder->crtc != crtc)
633 continue; 611 continue;
634 612
635 intel_encoder = enc_to_intel_encoder(encoder); 613 intel_dp = enc_to_intel_dp(encoder);
636 dp_priv = intel_encoder->dev_priv; 614 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
637 615 return intel_dp->is_pch_edp;
638 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
639 return dp_priv->is_pch_edp;
640 } 616 }
641 return false; 617 return false;
642} 618}
@@ -657,18 +633,15 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
657 * Find the lane count in the intel_encoder private 633 * Find the lane count in the intel_encoder private
658 */ 634 */
659 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 635 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
660 struct intel_encoder *intel_encoder; 636 struct intel_dp *intel_dp;
661 struct intel_dp_priv *dp_priv;
662 637
663 if (encoder->crtc != crtc) 638 if (encoder->crtc != crtc)
664 continue; 639 continue;
665 640
666 intel_encoder = enc_to_intel_encoder(encoder); 641 intel_dp = enc_to_intel_dp(encoder);
667 dp_priv = intel_encoder->dev_priv; 642 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
668 643 lane_count = intel_dp->lane_count;
669 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 644 if (IS_PCH_eDP(intel_dp))
670 lane_count = dp_priv->lane_count;
671 if (IS_PCH_eDP(dp_priv))
672 bpp = dev_priv->edp_bpp; 645 bpp = dev_priv->edp_bpp;
673 break; 646 break;
674 } 647 }
@@ -724,107 +697,114 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
724 struct drm_display_mode *adjusted_mode) 697 struct drm_display_mode *adjusted_mode)
725{ 698{
726 struct drm_device *dev = encoder->dev; 699 struct drm_device *dev = encoder->dev;
727 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 700 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
728 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 701 struct drm_crtc *crtc = intel_dp->base.enc.crtc;
729 struct drm_crtc *crtc = intel_encoder->enc.crtc;
730 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
731 703
732 dp_priv->DP = (DP_VOLTAGE_0_4 | 704 intel_dp->DP = (DP_VOLTAGE_0_4 |
733 DP_PRE_EMPHASIS_0); 705 DP_PRE_EMPHASIS_0);
734 706
735 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 707 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
736 dp_priv->DP |= DP_SYNC_HS_HIGH; 708 intel_dp->DP |= DP_SYNC_HS_HIGH;
737 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 709 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
738 dp_priv->DP |= DP_SYNC_VS_HIGH; 710 intel_dp->DP |= DP_SYNC_VS_HIGH;
739 711
740 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) 712 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
741 dp_priv->DP |= DP_LINK_TRAIN_OFF_CPT; 713 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
742 else 714 else
743 dp_priv->DP |= DP_LINK_TRAIN_OFF; 715 intel_dp->DP |= DP_LINK_TRAIN_OFF;
744 716
745 switch (dp_priv->lane_count) { 717 switch (intel_dp->lane_count) {
746 case 1: 718 case 1:
747 dp_priv->DP |= DP_PORT_WIDTH_1; 719 intel_dp->DP |= DP_PORT_WIDTH_1;
748 break; 720 break;
749 case 2: 721 case 2:
750 dp_priv->DP |= DP_PORT_WIDTH_2; 722 intel_dp->DP |= DP_PORT_WIDTH_2;
751 break; 723 break;
752 case 4: 724 case 4:
753 dp_priv->DP |= DP_PORT_WIDTH_4; 725 intel_dp->DP |= DP_PORT_WIDTH_4;
754 break; 726 break;
755 } 727 }
756 if (dp_priv->has_audio) 728 if (intel_dp->has_audio)
757 dp_priv->DP |= DP_AUDIO_OUTPUT_ENABLE; 729 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
758 730
759 memset(dp_priv->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 731 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
760 dp_priv->link_configuration[0] = dp_priv->link_bw; 732 intel_dp->link_configuration[0] = intel_dp->link_bw;
761 dp_priv->link_configuration[1] = dp_priv->lane_count; 733 intel_dp->link_configuration[1] = intel_dp->lane_count;
762 734
763 /* 735 /*
764 * Check for DPCD version > 1.1 and enhanced framing support 736 * Check for DPCD version > 1.1 and enhanced framing support
765 */ 737 */
766 if (dp_priv->dpcd[0] >= 0x11 && (dp_priv->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { 738 if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
767 dp_priv->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 739 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
768 dp_priv->DP |= DP_ENHANCED_FRAMING; 740 intel_dp->DP |= DP_ENHANCED_FRAMING;
769 } 741 }
770 742
771 /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 743 /* CPT DP's pipe select is decided in TRANS_DP_CTL */
772 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) 744 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
773 dp_priv->DP |= DP_PIPEB_SELECT; 745 intel_dp->DP |= DP_PIPEB_SELECT;
774 746
775 if (IS_eDP(intel_encoder)) { 747 if (IS_eDP(intel_dp)) {
776 /* don't miss out required setting for eDP */ 748 /* don't miss out required setting for eDP */
777 dp_priv->DP |= DP_PLL_ENABLE; 749 intel_dp->DP |= DP_PLL_ENABLE;
778 if (adjusted_mode->clock < 200000) 750 if (adjusted_mode->clock < 200000)
779 dp_priv->DP |= DP_PLL_FREQ_160MHZ; 751 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
780 else 752 else
781 dp_priv->DP |= DP_PLL_FREQ_270MHZ; 753 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
782 } 754 }
783} 755}
784 756
785static void ironlake_edp_panel_on (struct drm_device *dev) 757static void ironlake_edp_panel_on (struct drm_device *dev)
786{ 758{
787 struct drm_i915_private *dev_priv = dev->dev_private; 759 struct drm_i915_private *dev_priv = dev->dev_private;
788 unsigned long timeout = jiffies + msecs_to_jiffies(5000); 760 u32 pp;
789 u32 pp, pp_status;
790 761
791 pp_status = I915_READ(PCH_PP_STATUS); 762 if (I915_READ(PCH_PP_STATUS) & PP_ON)
792 if (pp_status & PP_ON)
793 return; 763 return;
794 764
795 pp = I915_READ(PCH_PP_CONTROL); 765 pp = I915_READ(PCH_PP_CONTROL);
766
767 /* ILK workaround: disable reset around power sequence */
768 pp &= ~PANEL_POWER_RESET;
769 I915_WRITE(PCH_PP_CONTROL, pp);
770 POSTING_READ(PCH_PP_CONTROL);
771
796 pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; 772 pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
797 I915_WRITE(PCH_PP_CONTROL, pp); 773 I915_WRITE(PCH_PP_CONTROL, pp);
798 do {
799 pp_status = I915_READ(PCH_PP_STATUS);
800 } while (((pp_status & PP_ON) == 0) && !time_after(jiffies, timeout));
801 774
802 if (time_after(jiffies, timeout)) 775 if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000, 10))
803 DRM_DEBUG_KMS("panel on wait timed out: 0x%08x\n", pp_status); 776 DRM_ERROR("panel on wait timed out: 0x%08x\n",
777 I915_READ(PCH_PP_STATUS));
804 778
805 pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD); 779 pp &= ~(PANEL_UNLOCK_REGS | EDP_FORCE_VDD);
780 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
806 I915_WRITE(PCH_PP_CONTROL, pp); 781 I915_WRITE(PCH_PP_CONTROL, pp);
782 POSTING_READ(PCH_PP_CONTROL);
807} 783}
808 784
809static void ironlake_edp_panel_off (struct drm_device *dev) 785static void ironlake_edp_panel_off (struct drm_device *dev)
810{ 786{
811 struct drm_i915_private *dev_priv = dev->dev_private; 787 struct drm_i915_private *dev_priv = dev->dev_private;
812 unsigned long timeout = jiffies + msecs_to_jiffies(5000); 788 u32 pp;
813 u32 pp, pp_status;
814 789
815 pp = I915_READ(PCH_PP_CONTROL); 790 pp = I915_READ(PCH_PP_CONTROL);
791
792 /* ILK workaround: disable reset around power sequence */
793 pp &= ~PANEL_POWER_RESET;
794 I915_WRITE(PCH_PP_CONTROL, pp);
795 POSTING_READ(PCH_PP_CONTROL);
796
816 pp &= ~POWER_TARGET_ON; 797 pp &= ~POWER_TARGET_ON;
817 I915_WRITE(PCH_PP_CONTROL, pp); 798 I915_WRITE(PCH_PP_CONTROL, pp);
818 do {
819 pp_status = I915_READ(PCH_PP_STATUS);
820 } while ((pp_status & PP_ON) && !time_after(jiffies, timeout));
821 799
822 if (time_after(jiffies, timeout)) 800 if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000, 10))
823 DRM_DEBUG_KMS("panel off wait timed out\n"); 801 DRM_ERROR("panel off wait timed out: 0x%08x\n",
802 I915_READ(PCH_PP_STATUS));
824 803
825 /* Make sure VDD is enabled so DP AUX will work */ 804 /* Make sure VDD is enabled so DP AUX will work */
826 pp |= EDP_FORCE_VDD; 805 pp |= EDP_FORCE_VDD | PANEL_POWER_RESET; /* restore panel reset bit */
827 I915_WRITE(PCH_PP_CONTROL, pp); 806 I915_WRITE(PCH_PP_CONTROL, pp);
807 POSTING_READ(PCH_PP_CONTROL);
828} 808}
829 809
830static void ironlake_edp_backlight_on (struct drm_device *dev) 810static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -849,33 +829,87 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
849 I915_WRITE(PCH_PP_CONTROL, pp); 829 I915_WRITE(PCH_PP_CONTROL, pp);
850} 830}
851 831
832static void ironlake_edp_pll_on(struct drm_encoder *encoder)
833{
834 struct drm_device *dev = encoder->dev;
835 struct drm_i915_private *dev_priv = dev->dev_private;
836 u32 dpa_ctl;
837
838 DRM_DEBUG_KMS("\n");
839 dpa_ctl = I915_READ(DP_A);
840 dpa_ctl &= ~DP_PLL_ENABLE;
841 I915_WRITE(DP_A, dpa_ctl);
842}
843
844static void ironlake_edp_pll_off(struct drm_encoder *encoder)
845{
846 struct drm_device *dev = encoder->dev;
847 struct drm_i915_private *dev_priv = dev->dev_private;
848 u32 dpa_ctl;
849
850 dpa_ctl = I915_READ(DP_A);
851 dpa_ctl |= DP_PLL_ENABLE;
852 I915_WRITE(DP_A, dpa_ctl);
853 udelay(200);
854}
855
856static void intel_dp_prepare(struct drm_encoder *encoder)
857{
858 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
859 struct drm_device *dev = encoder->dev;
860 struct drm_i915_private *dev_priv = dev->dev_private;
861 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
862
863 if (IS_eDP(intel_dp)) {
864 ironlake_edp_backlight_off(dev);
865 ironlake_edp_panel_on(dev);
866 ironlake_edp_pll_on(encoder);
867 }
868 if (dp_reg & DP_PORT_EN)
869 intel_dp_link_down(intel_dp);
870}
871
872static void intel_dp_commit(struct drm_encoder *encoder)
873{
874 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
875 struct drm_device *dev = encoder->dev;
876 struct drm_i915_private *dev_priv = dev->dev_private;
877 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
878
879 if (!(dp_reg & DP_PORT_EN)) {
880 intel_dp_link_train(intel_dp);
881 }
882 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
883 ironlake_edp_backlight_on(dev);
884}
885
852static void 886static void
853intel_dp_dpms(struct drm_encoder *encoder, int mode) 887intel_dp_dpms(struct drm_encoder *encoder, int mode)
854{ 888{
855 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 889 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
856 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
857 struct drm_device *dev = encoder->dev; 890 struct drm_device *dev = encoder->dev;
858 struct drm_i915_private *dev_priv = dev->dev_private; 891 struct drm_i915_private *dev_priv = dev->dev_private;
859 uint32_t dp_reg = I915_READ(dp_priv->output_reg); 892 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
860 893
861 if (mode != DRM_MODE_DPMS_ON) { 894 if (mode != DRM_MODE_DPMS_ON) {
862 if (dp_reg & DP_PORT_EN) { 895 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
863 intel_dp_link_down(intel_encoder, dp_priv->DP); 896 ironlake_edp_backlight_off(dev);
864 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { 897 ironlake_edp_panel_off(dev);
865 ironlake_edp_backlight_off(dev);
866 ironlake_edp_panel_off(dev);
867 }
868 } 898 }
899 if (dp_reg & DP_PORT_EN)
900 intel_dp_link_down(intel_dp);
901 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
902 ironlake_edp_pll_off(encoder);
869 } else { 903 } else {
870 if (!(dp_reg & DP_PORT_EN)) { 904 if (!(dp_reg & DP_PORT_EN)) {
871 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); 905 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
872 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
873 ironlake_edp_panel_on(dev); 906 ironlake_edp_panel_on(dev);
907 intel_dp_link_train(intel_dp);
908 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp))
874 ironlake_edp_backlight_on(dev); 909 ironlake_edp_backlight_on(dev);
875 }
876 } 910 }
877 } 911 }
878 dp_priv->dpms_mode = mode; 912 intel_dp->dpms_mode = mode;
879} 913}
880 914
881/* 915/*
@@ -883,12 +917,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
883 * link status information 917 * link status information
884 */ 918 */
885static bool 919static bool
886intel_dp_get_link_status(struct intel_encoder *intel_encoder, 920intel_dp_get_link_status(struct intel_dp *intel_dp,
887 uint8_t link_status[DP_LINK_STATUS_SIZE]) 921 uint8_t link_status[DP_LINK_STATUS_SIZE])
888{ 922{
889 int ret; 923 int ret;
890 924
891 ret = intel_dp_aux_native_read(intel_encoder, 925 ret = intel_dp_aux_native_read(intel_dp,
892 DP_LANE0_1_STATUS, 926 DP_LANE0_1_STATUS,
893 link_status, DP_LINK_STATUS_SIZE); 927 link_status, DP_LINK_STATUS_SIZE);
894 if (ret != DP_LINK_STATUS_SIZE) 928 if (ret != DP_LINK_STATUS_SIZE)
@@ -965,7 +999,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
965} 999}
966 1000
967static void 1001static void
968intel_get_adjust_train(struct intel_encoder *intel_encoder, 1002intel_get_adjust_train(struct intel_dp *intel_dp,
969 uint8_t link_status[DP_LINK_STATUS_SIZE], 1003 uint8_t link_status[DP_LINK_STATUS_SIZE],
970 int lane_count, 1004 int lane_count,
971 uint8_t train_set[4]) 1005 uint8_t train_set[4])
@@ -1101,27 +1135,27 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1101} 1135}
1102 1136
1103static bool 1137static bool
1104intel_dp_set_link_train(struct intel_encoder *intel_encoder, 1138intel_dp_set_link_train(struct intel_dp *intel_dp,
1105 uint32_t dp_reg_value, 1139 uint32_t dp_reg_value,
1106 uint8_t dp_train_pat, 1140 uint8_t dp_train_pat,
1107 uint8_t train_set[4], 1141 uint8_t train_set[4],
1108 bool first) 1142 bool first)
1109{ 1143{
1110 struct drm_device *dev = intel_encoder->enc.dev; 1144 struct drm_device *dev = intel_dp->base.enc.dev;
1111 struct drm_i915_private *dev_priv = dev->dev_private; 1145 struct drm_i915_private *dev_priv = dev->dev_private;
1112 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1146 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc);
1113 int ret; 1147 int ret;
1114 1148
1115 I915_WRITE(dp_priv->output_reg, dp_reg_value); 1149 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1116 POSTING_READ(dp_priv->output_reg); 1150 POSTING_READ(intel_dp->output_reg);
1117 if (first) 1151 if (first)
1118 intel_wait_for_vblank(dev); 1152 intel_wait_for_vblank(dev, intel_crtc->pipe);
1119 1153
1120 intel_dp_aux_native_write_1(intel_encoder, 1154 intel_dp_aux_native_write_1(intel_dp,
1121 DP_TRAINING_PATTERN_SET, 1155 DP_TRAINING_PATTERN_SET,
1122 dp_train_pat); 1156 dp_train_pat);
1123 1157
1124 ret = intel_dp_aux_native_write(intel_encoder, 1158 ret = intel_dp_aux_native_write(intel_dp,
1125 DP_TRAINING_LANE0_SET, train_set, 4); 1159 DP_TRAINING_LANE0_SET, train_set, 4);
1126 if (ret != 4) 1160 if (ret != 4)
1127 return false; 1161 return false;
@@ -1130,12 +1164,10 @@ intel_dp_set_link_train(struct intel_encoder *intel_encoder,
1130} 1164}
1131 1165
1132static void 1166static void
1133intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP, 1167intel_dp_link_train(struct intel_dp *intel_dp)
1134 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
1135{ 1168{
1136 struct drm_device *dev = intel_encoder->enc.dev; 1169 struct drm_device *dev = intel_dp->base.enc.dev;
1137 struct drm_i915_private *dev_priv = dev->dev_private; 1170 struct drm_i915_private *dev_priv = dev->dev_private;
1138 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1139 uint8_t train_set[4]; 1171 uint8_t train_set[4];
1140 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1172 uint8_t link_status[DP_LINK_STATUS_SIZE];
1141 int i; 1173 int i;
@@ -1145,13 +1177,15 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1145 bool first = true; 1177 bool first = true;
1146 int tries; 1178 int tries;
1147 u32 reg; 1179 u32 reg;
1180 uint32_t DP = intel_dp->DP;
1148 1181
1149 /* Write the link configuration data */ 1182 /* Write the link configuration data */
1150 intel_dp_aux_native_write(intel_encoder, DP_LINK_BW_SET, 1183 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1151 link_configuration, DP_LINK_CONFIGURATION_SIZE); 1184 intel_dp->link_configuration,
1185 DP_LINK_CONFIGURATION_SIZE);
1152 1186
1153 DP |= DP_PORT_EN; 1187 DP |= DP_PORT_EN;
1154 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) 1188 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
1155 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1189 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1156 else 1190 else
1157 DP &= ~DP_LINK_TRAIN_MASK; 1191 DP &= ~DP_LINK_TRAIN_MASK;
@@ -1162,39 +1196,39 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1162 for (;;) { 1196 for (;;) {
1163 /* Use train_set[0] to set the voltage and pre emphasis values */ 1197 /* Use train_set[0] to set the voltage and pre emphasis values */
1164 uint32_t signal_levels; 1198 uint32_t signal_levels;
1165 if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { 1199 if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
1166 signal_levels = intel_gen6_edp_signal_levels(train_set[0]); 1200 signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
1167 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1201 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1168 } else { 1202 } else {
1169 signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1203 signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
1170 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1204 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1171 } 1205 }
1172 1206
1173 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) 1207 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
1174 reg = DP | DP_LINK_TRAIN_PAT_1_CPT; 1208 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1175 else 1209 else
1176 reg = DP | DP_LINK_TRAIN_PAT_1; 1210 reg = DP | DP_LINK_TRAIN_PAT_1;
1177 1211
1178 if (!intel_dp_set_link_train(intel_encoder, reg, 1212 if (!intel_dp_set_link_train(intel_dp, reg,
1179 DP_TRAINING_PATTERN_1, train_set, first)) 1213 DP_TRAINING_PATTERN_1, train_set, first))
1180 break; 1214 break;
1181 first = false; 1215 first = false;
1182 /* Set training pattern 1 */ 1216 /* Set training pattern 1 */
1183 1217
1184 udelay(100); 1218 udelay(100);
1185 if (!intel_dp_get_link_status(intel_encoder, link_status)) 1219 if (!intel_dp_get_link_status(intel_dp, link_status))
1186 break; 1220 break;
1187 1221
1188 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { 1222 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1189 clock_recovery = true; 1223 clock_recovery = true;
1190 break; 1224 break;
1191 } 1225 }
1192 1226
1193 /* Check to see if we've tried the max voltage */ 1227 /* Check to see if we've tried the max voltage */
1194 for (i = 0; i < dp_priv->lane_count; i++) 1228 for (i = 0; i < intel_dp->lane_count; i++)
1195 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1229 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1196 break; 1230 break;
1197 if (i == dp_priv->lane_count) 1231 if (i == intel_dp->lane_count)
1198 break; 1232 break;
1199 1233
1200 /* Check to see if we've tried the same voltage 5 times */ 1234 /* Check to see if we've tried the same voltage 5 times */
@@ -1207,7 +1241,7 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1207 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1241 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1208 1242
1209 /* Compute new train_set as requested by target */ 1243 /* Compute new train_set as requested by target */
1210 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); 1244 intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
1211 } 1245 }
1212 1246
1213 /* channel equalization */ 1247 /* channel equalization */
@@ -1217,30 +1251,30 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1217 /* Use train_set[0] to set the voltage and pre emphasis values */ 1251 /* Use train_set[0] to set the voltage and pre emphasis values */
1218 uint32_t signal_levels; 1252 uint32_t signal_levels;
1219 1253
1220 if (IS_GEN6(dev) && IS_eDP(intel_encoder)) { 1254 if (IS_GEN6(dev) && IS_eDP(intel_dp)) {
1221 signal_levels = intel_gen6_edp_signal_levels(train_set[0]); 1255 signal_levels = intel_gen6_edp_signal_levels(train_set[0]);
1222 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1256 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1223 } else { 1257 } else {
1224 signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1258 signal_levels = intel_dp_signal_levels(train_set[0], intel_dp->lane_count);
1225 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1259 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1226 } 1260 }
1227 1261
1228 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) 1262 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
1229 reg = DP | DP_LINK_TRAIN_PAT_2_CPT; 1263 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1230 else 1264 else
1231 reg = DP | DP_LINK_TRAIN_PAT_2; 1265 reg = DP | DP_LINK_TRAIN_PAT_2;
1232 1266
1233 /* channel eq pattern */ 1267 /* channel eq pattern */
1234 if (!intel_dp_set_link_train(intel_encoder, reg, 1268 if (!intel_dp_set_link_train(intel_dp, reg,
1235 DP_TRAINING_PATTERN_2, train_set, 1269 DP_TRAINING_PATTERN_2, train_set,
1236 false)) 1270 false))
1237 break; 1271 break;
1238 1272
1239 udelay(400); 1273 udelay(400);
1240 if (!intel_dp_get_link_status(intel_encoder, link_status)) 1274 if (!intel_dp_get_link_status(intel_dp, link_status))
1241 break; 1275 break;
1242 1276
1243 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { 1277 if (intel_channel_eq_ok(link_status, intel_dp->lane_count)) {
1244 channel_eq = true; 1278 channel_eq = true;
1245 break; 1279 break;
1246 } 1280 }
@@ -1250,53 +1284,53 @@ intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
1250 break; 1284 break;
1251 1285
1252 /* Compute new train_set as requested by target */ 1286 /* Compute new train_set as requested by target */
1253 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set); 1287 intel_get_adjust_train(intel_dp, link_status, intel_dp->lane_count, train_set);
1254 ++tries; 1288 ++tries;
1255 } 1289 }
1256 1290
1257 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) 1291 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp))
1258 reg = DP | DP_LINK_TRAIN_OFF_CPT; 1292 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1259 else 1293 else
1260 reg = DP | DP_LINK_TRAIN_OFF; 1294 reg = DP | DP_LINK_TRAIN_OFF;
1261 1295
1262 I915_WRITE(dp_priv->output_reg, reg); 1296 I915_WRITE(intel_dp->output_reg, reg);
1263 POSTING_READ(dp_priv->output_reg); 1297 POSTING_READ(intel_dp->output_reg);
1264 intel_dp_aux_native_write_1(intel_encoder, 1298 intel_dp_aux_native_write_1(intel_dp,
1265 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); 1299 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1266} 1300}
1267 1301
1268static void 1302static void
1269intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP) 1303intel_dp_link_down(struct intel_dp *intel_dp)
1270{ 1304{
1271 struct drm_device *dev = intel_encoder->enc.dev; 1305 struct drm_device *dev = intel_dp->base.enc.dev;
1272 struct drm_i915_private *dev_priv = dev->dev_private; 1306 struct drm_i915_private *dev_priv = dev->dev_private;
1273 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1307 uint32_t DP = intel_dp->DP;
1274 1308
1275 DRM_DEBUG_KMS("\n"); 1309 DRM_DEBUG_KMS("\n");
1276 1310
1277 if (IS_eDP(intel_encoder)) { 1311 if (IS_eDP(intel_dp)) {
1278 DP &= ~DP_PLL_ENABLE; 1312 DP &= ~DP_PLL_ENABLE;
1279 I915_WRITE(dp_priv->output_reg, DP); 1313 I915_WRITE(intel_dp->output_reg, DP);
1280 POSTING_READ(dp_priv->output_reg); 1314 POSTING_READ(intel_dp->output_reg);
1281 udelay(100); 1315 udelay(100);
1282 } 1316 }
1283 1317
1284 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_encoder)) { 1318 if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) {
1285 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1319 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1286 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1320 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1287 POSTING_READ(dp_priv->output_reg); 1321 POSTING_READ(intel_dp->output_reg);
1288 } else { 1322 } else {
1289 DP &= ~DP_LINK_TRAIN_MASK; 1323 DP &= ~DP_LINK_TRAIN_MASK;
1290 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1324 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1291 POSTING_READ(dp_priv->output_reg); 1325 POSTING_READ(intel_dp->output_reg);
1292 } 1326 }
1293 1327
1294 udelay(17000); 1328 udelay(17000);
1295 1329
1296 if (IS_eDP(intel_encoder)) 1330 if (IS_eDP(intel_dp))
1297 DP |= DP_LINK_TRAIN_OFF; 1331 DP |= DP_LINK_TRAIN_OFF;
1298 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); 1332 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1299 POSTING_READ(dp_priv->output_reg); 1333 POSTING_READ(intel_dp->output_reg);
1300} 1334}
1301 1335
1302/* 1336/*
@@ -1309,41 +1343,39 @@ intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1309 */ 1343 */
1310 1344
1311static void 1345static void
1312intel_dp_check_link_status(struct intel_encoder *intel_encoder) 1346intel_dp_check_link_status(struct intel_dp *intel_dp)
1313{ 1347{
1314 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1315 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1348 uint8_t link_status[DP_LINK_STATUS_SIZE];
1316 1349
1317 if (!intel_encoder->enc.crtc) 1350 if (!intel_dp->base.enc.crtc)
1318 return; 1351 return;
1319 1352
1320 if (!intel_dp_get_link_status(intel_encoder, link_status)) { 1353 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1321 intel_dp_link_down(intel_encoder, dp_priv->DP); 1354 intel_dp_link_down(intel_dp);
1322 return; 1355 return;
1323 } 1356 }
1324 1357
1325 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) 1358 if (!intel_channel_eq_ok(link_status, intel_dp->lane_count))
1326 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); 1359 intel_dp_link_train(intel_dp);
1327} 1360}
1328 1361
1329static enum drm_connector_status 1362static enum drm_connector_status
1330ironlake_dp_detect(struct drm_connector *connector) 1363ironlake_dp_detect(struct drm_connector *connector)
1331{ 1364{
1332 struct drm_encoder *encoder = intel_attached_encoder(connector); 1365 struct drm_encoder *encoder = intel_attached_encoder(connector);
1333 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1366 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1334 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1335 enum drm_connector_status status; 1367 enum drm_connector_status status;
1336 1368
1337 status = connector_status_disconnected; 1369 status = connector_status_disconnected;
1338 if (intel_dp_aux_native_read(intel_encoder, 1370 if (intel_dp_aux_native_read(intel_dp,
1339 0x000, dp_priv->dpcd, 1371 0x000, intel_dp->dpcd,
1340 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1372 sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
1341 { 1373 {
1342 if (dp_priv->dpcd[0] != 0) 1374 if (intel_dp->dpcd[0] != 0)
1343 status = connector_status_connected; 1375 status = connector_status_connected;
1344 } 1376 }
1345 DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", dp_priv->dpcd[0], 1377 DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
1346 dp_priv->dpcd[1], dp_priv->dpcd[2], dp_priv->dpcd[3]); 1378 intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
1347 return status; 1379 return status;
1348} 1380}
1349 1381
@@ -1357,19 +1389,18 @@ static enum drm_connector_status
1357intel_dp_detect(struct drm_connector *connector) 1389intel_dp_detect(struct drm_connector *connector)
1358{ 1390{
1359 struct drm_encoder *encoder = intel_attached_encoder(connector); 1391 struct drm_encoder *encoder = intel_attached_encoder(connector);
1360 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1392 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1361 struct drm_device *dev = intel_encoder->enc.dev; 1393 struct drm_device *dev = intel_dp->base.enc.dev;
1362 struct drm_i915_private *dev_priv = dev->dev_private; 1394 struct drm_i915_private *dev_priv = dev->dev_private;
1363 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1364 uint32_t temp, bit; 1395 uint32_t temp, bit;
1365 enum drm_connector_status status; 1396 enum drm_connector_status status;
1366 1397
1367 dp_priv->has_audio = false; 1398 intel_dp->has_audio = false;
1368 1399
1369 if (HAS_PCH_SPLIT(dev)) 1400 if (HAS_PCH_SPLIT(dev))
1370 return ironlake_dp_detect(connector); 1401 return ironlake_dp_detect(connector);
1371 1402
1372 switch (dp_priv->output_reg) { 1403 switch (intel_dp->output_reg) {
1373 case DP_B: 1404 case DP_B:
1374 bit = DPB_HOTPLUG_INT_STATUS; 1405 bit = DPB_HOTPLUG_INT_STATUS;
1375 break; 1406 break;
@@ -1389,11 +1420,11 @@ intel_dp_detect(struct drm_connector *connector)
1389 return connector_status_disconnected; 1420 return connector_status_disconnected;
1390 1421
1391 status = connector_status_disconnected; 1422 status = connector_status_disconnected;
1392 if (intel_dp_aux_native_read(intel_encoder, 1423 if (intel_dp_aux_native_read(intel_dp,
1393 0x000, dp_priv->dpcd, 1424 0x000, intel_dp->dpcd,
1394 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1425 sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
1395 { 1426 {
1396 if (dp_priv->dpcd[0] != 0) 1427 if (intel_dp->dpcd[0] != 0)
1397 status = connector_status_connected; 1428 status = connector_status_connected;
1398 } 1429 }
1399 return status; 1430 return status;
@@ -1402,18 +1433,17 @@ intel_dp_detect(struct drm_connector *connector)
1402static int intel_dp_get_modes(struct drm_connector *connector) 1433static int intel_dp_get_modes(struct drm_connector *connector)
1403{ 1434{
1404 struct drm_encoder *encoder = intel_attached_encoder(connector); 1435 struct drm_encoder *encoder = intel_attached_encoder(connector);
1405 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1436 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1406 struct drm_device *dev = intel_encoder->enc.dev; 1437 struct drm_device *dev = intel_dp->base.enc.dev;
1407 struct drm_i915_private *dev_priv = dev->dev_private; 1438 struct drm_i915_private *dev_priv = dev->dev_private;
1408 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1409 int ret; 1439 int ret;
1410 1440
1411 /* We should parse the EDID data and find out if it has an audio sink 1441 /* We should parse the EDID data and find out if it has an audio sink
1412 */ 1442 */
1413 1443
1414 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); 1444 ret = intel_ddc_get_modes(connector, intel_dp->base.ddc_bus);
1415 if (ret) { 1445 if (ret) {
1416 if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && 1446 if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) &&
1417 !dev_priv->panel_fixed_mode) { 1447 !dev_priv->panel_fixed_mode) {
1418 struct drm_display_mode *newmode; 1448 struct drm_display_mode *newmode;
1419 list_for_each_entry(newmode, &connector->probed_modes, 1449 list_for_each_entry(newmode, &connector->probed_modes,
@@ -1430,7 +1460,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1430 } 1460 }
1431 1461
1432 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 1462 /* if eDP has no EDID, try to use fixed panel mode from VBT */
1433 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { 1463 if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) {
1434 if (dev_priv->panel_fixed_mode != NULL) { 1464 if (dev_priv->panel_fixed_mode != NULL) {
1435 struct drm_display_mode *mode; 1465 struct drm_display_mode *mode;
1436 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 1466 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1452,9 +1482,9 @@ intel_dp_destroy (struct drm_connector *connector)
1452static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1482static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
1453 .dpms = intel_dp_dpms, 1483 .dpms = intel_dp_dpms,
1454 .mode_fixup = intel_dp_mode_fixup, 1484 .mode_fixup = intel_dp_mode_fixup,
1455 .prepare = intel_encoder_prepare, 1485 .prepare = intel_dp_prepare,
1456 .mode_set = intel_dp_mode_set, 1486 .mode_set = intel_dp_mode_set,
1457 .commit = intel_encoder_commit, 1487 .commit = intel_dp_commit,
1458}; 1488};
1459 1489
1460static const struct drm_connector_funcs intel_dp_connector_funcs = { 1490static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -1470,27 +1500,17 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
1470 .best_encoder = intel_attached_encoder, 1500 .best_encoder = intel_attached_encoder,
1471}; 1501};
1472 1502
1473static void intel_dp_enc_destroy(struct drm_encoder *encoder)
1474{
1475 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1476
1477 if (intel_encoder->i2c_bus)
1478 intel_i2c_destroy(intel_encoder->i2c_bus);
1479 drm_encoder_cleanup(encoder);
1480 kfree(intel_encoder);
1481}
1482
1483static const struct drm_encoder_funcs intel_dp_enc_funcs = { 1503static const struct drm_encoder_funcs intel_dp_enc_funcs = {
1484 .destroy = intel_dp_enc_destroy, 1504 .destroy = intel_encoder_destroy,
1485}; 1505};
1486 1506
1487void 1507void
1488intel_dp_hot_plug(struct intel_encoder *intel_encoder) 1508intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1489{ 1509{
1490 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1510 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
1491 1511
1492 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1512 if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
1493 intel_dp_check_link_status(intel_encoder); 1513 intel_dp_check_link_status(intel_dp);
1494} 1514}
1495 1515
1496/* Return which DP Port should be selected for Transcoder DP control */ 1516/* Return which DP Port should be selected for Transcoder DP control */
@@ -1500,18 +1520,18 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
1500 struct drm_device *dev = crtc->dev; 1520 struct drm_device *dev = crtc->dev;
1501 struct drm_mode_config *mode_config = &dev->mode_config; 1521 struct drm_mode_config *mode_config = &dev->mode_config;
1502 struct drm_encoder *encoder; 1522 struct drm_encoder *encoder;
1503 struct intel_encoder *intel_encoder = NULL;
1504 1523
1505 list_for_each_entry(encoder, &mode_config->encoder_list, head) { 1524 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
1525 struct intel_dp *intel_dp;
1526
1506 if (encoder->crtc != crtc) 1527 if (encoder->crtc != crtc)
1507 continue; 1528 continue;
1508 1529
1509 intel_encoder = enc_to_intel_encoder(encoder); 1530 intel_dp = enc_to_intel_dp(encoder);
1510 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 1531 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
1511 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 1532 return intel_dp->output_reg;
1512 return dp_priv->output_reg;
1513 }
1514 } 1533 }
1534
1515 return -1; 1535 return -1;
1516} 1536}
1517 1537
@@ -1540,30 +1560,28 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1540{ 1560{
1541 struct drm_i915_private *dev_priv = dev->dev_private; 1561 struct drm_i915_private *dev_priv = dev->dev_private;
1542 struct drm_connector *connector; 1562 struct drm_connector *connector;
1563 struct intel_dp *intel_dp;
1543 struct intel_encoder *intel_encoder; 1564 struct intel_encoder *intel_encoder;
1544 struct intel_connector *intel_connector; 1565 struct intel_connector *intel_connector;
1545 struct intel_dp_priv *dp_priv;
1546 const char *name = NULL; 1566 const char *name = NULL;
1547 int type; 1567 int type;
1548 1568
1549 intel_encoder = kcalloc(sizeof(struct intel_encoder) + 1569 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
1550 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1570 if (!intel_dp)
1551 if (!intel_encoder)
1552 return; 1571 return;
1553 1572
1554 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1573 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1555 if (!intel_connector) { 1574 if (!intel_connector) {
1556 kfree(intel_encoder); 1575 kfree(intel_dp);
1557 return; 1576 return;
1558 } 1577 }
1578 intel_encoder = &intel_dp->base;
1559 1579
1560 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); 1580 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
1561
1562 if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
1563 if (intel_dpd_is_edp(dev)) 1581 if (intel_dpd_is_edp(dev))
1564 dp_priv->is_pch_edp = true; 1582 intel_dp->is_pch_edp = true;
1565 1583
1566 if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { 1584 if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
1567 type = DRM_MODE_CONNECTOR_eDP; 1585 type = DRM_MODE_CONNECTOR_eDP;
1568 intel_encoder->type = INTEL_OUTPUT_EDP; 1586 intel_encoder->type = INTEL_OUTPUT_EDP;
1569 } else { 1587 } else {
@@ -1584,18 +1602,16 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1584 else if (output_reg == DP_D || output_reg == PCH_DP_D) 1602 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1585 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1603 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1586 1604
1587 if (IS_eDP(intel_encoder)) 1605 if (IS_eDP(intel_dp))
1588 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1606 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1589 1607
1590 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1608 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1591 connector->interlace_allowed = true; 1609 connector->interlace_allowed = true;
1592 connector->doublescan_allowed = 0; 1610 connector->doublescan_allowed = 0;
1593 1611
1594 dp_priv->intel_encoder = intel_encoder; 1612 intel_dp->output_reg = output_reg;
1595 dp_priv->output_reg = output_reg; 1613 intel_dp->has_audio = false;
1596 dp_priv->has_audio = false; 1614 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1597 dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
1598 intel_encoder->dev_priv = dp_priv;
1599 1615
1600 drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs, 1616 drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
1601 DRM_MODE_ENCODER_TMDS); 1617 DRM_MODE_ENCODER_TMDS);
@@ -1630,12 +1646,12 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1630 break; 1646 break;
1631 } 1647 }
1632 1648
1633 intel_dp_i2c_init(intel_encoder, intel_connector, name); 1649 intel_dp_i2c_init(intel_dp, intel_connector, name);
1634 1650
1635 intel_encoder->ddc_bus = &dp_priv->adapter; 1651 intel_encoder->ddc_bus = &intel_dp->adapter;
1636 intel_encoder->hot_plug = intel_dp_hot_plug; 1652 intel_encoder->hot_plug = intel_dp_hot_plug;
1637 1653
1638 if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { 1654 if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) {
1639 /* initialize panel mode from VBT if available for eDP */ 1655 /* initialize panel mode from VBT if available for eDP */
1640 if (dev_priv->lfp_lvds_vbt_mode) { 1656 if (dev_priv->lfp_lvds_vbt_mode) {
1641 dev_priv->panel_fixed_mode = 1657 dev_priv->panel_fixed_mode =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b2190148703a..ad312ca6b3e5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,6 +32,20 @@
32#include "drm_crtc.h" 32#include "drm_crtc.h"
33 33
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35
36#define wait_for(COND, MS, W) ({ \
37 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
38 int ret__ = 0; \
39 while (! (COND)) { \
40 if (time_after(jiffies, timeout__)) { \
41 ret__ = -ETIMEDOUT; \
42 break; \
43 } \
44 if (W) msleep(W); \
45 } \
46 ret__; \
47})
48
35/* 49/*
36 * Display related stuff 50 * Display related stuff
37 */ 51 */
@@ -102,7 +116,6 @@ struct intel_encoder {
102 struct i2c_adapter *ddc_bus; 116 struct i2c_adapter *ddc_bus;
103 bool load_detect_temp; 117 bool load_detect_temp;
104 bool needs_tv_clock; 118 bool needs_tv_clock;
105 void *dev_priv;
106 void (*hot_plug)(struct intel_encoder *); 119 void (*hot_plug)(struct intel_encoder *);
107 int crtc_mask; 120 int crtc_mask;
108 int clone_mask; 121 int clone_mask;
@@ -110,7 +123,6 @@ struct intel_encoder {
110 123
111struct intel_connector { 124struct intel_connector {
112 struct drm_connector base; 125 struct drm_connector base;
113 void *dev_priv;
114}; 126};
115 127
116struct intel_crtc; 128struct intel_crtc;
@@ -156,7 +168,7 @@ struct intel_crtc {
156 uint32_t cursor_addr; 168 uint32_t cursor_addr;
157 int16_t cursor_x, cursor_y; 169 int16_t cursor_x, cursor_y;
158 int16_t cursor_width, cursor_height; 170 int16_t cursor_width, cursor_height;
159 bool cursor_visble; 171 bool cursor_visible, cursor_on;
160}; 172};
161 173
162#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 174#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -164,6 +176,16 @@ struct intel_crtc {
164#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc) 176#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
165#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 177#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
166 178
179struct intel_unpin_work {
180 struct work_struct work;
181 struct drm_device *dev;
182 struct drm_gem_object *old_fb_obj;
183 struct drm_gem_object *pending_flip_obj;
184 struct drm_pending_vblank_event *event;
185 int pending;
186 bool enable_stall_check;
187};
188
167struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 189struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
168 const char *name); 190 const char *name);
169void intel_i2c_destroy(struct i2c_adapter *adapter); 191void intel_i2c_destroy(struct i2c_adapter *adapter);
@@ -188,10 +210,18 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
188extern void intel_edp_link_config (struct intel_encoder *, int *, int *); 210extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
189 211
190 212
213extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
214 struct drm_display_mode *adjusted_mode);
215extern void intel_pch_panel_fitting(struct drm_device *dev,
216 int fitting_mode,
217 struct drm_display_mode *mode,
218 struct drm_display_mode *adjusted_mode);
219
191extern int intel_panel_fitter_pipe (struct drm_device *dev); 220extern int intel_panel_fitter_pipe (struct drm_device *dev);
192extern void intel_crtc_load_lut(struct drm_crtc *crtc); 221extern void intel_crtc_load_lut(struct drm_crtc *crtc);
193extern void intel_encoder_prepare (struct drm_encoder *encoder); 222extern void intel_encoder_prepare (struct drm_encoder *encoder);
194extern void intel_encoder_commit (struct drm_encoder *encoder); 223extern void intel_encoder_commit (struct drm_encoder *encoder);
224extern void intel_encoder_destroy(struct drm_encoder *encoder);
195 225
196extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector); 226extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
197 227
@@ -199,7 +229,8 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
199 struct drm_crtc *crtc); 229 struct drm_crtc *crtc);
200int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 230int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
201 struct drm_file *file_priv); 231 struct drm_file *file_priv);
202extern void intel_wait_for_vblank(struct drm_device *dev); 232extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe);
233extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
203extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 234extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
204extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 235extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
205 struct drm_connector *connector, 236 struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 227feca7cf8d..a399f4b2c1c5 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -38,7 +38,7 @@
38#define CH7xxx_ADDR 0x76 38#define CH7xxx_ADDR 0x76
39#define TFP410_ADDR 0x38 39#define TFP410_ADDR 0x38
40 40
41static struct intel_dvo_device intel_dvo_devices[] = { 41static const struct intel_dvo_device intel_dvo_devices[] = {
42 { 42 {
43 .type = INTEL_DVO_CHIP_TMDS, 43 .type = INTEL_DVO_CHIP_TMDS,
44 .name = "sil164", 44 .name = "sil164",
@@ -77,20 +77,33 @@ static struct intel_dvo_device intel_dvo_devices[] = {
77 } 77 }
78}; 78};
79 79
80struct intel_dvo {
81 struct intel_encoder base;
82
83 struct intel_dvo_device dev;
84
85 struct drm_display_mode *panel_fixed_mode;
86 bool panel_wants_dither;
87};
88
89static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
90{
91 return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
92}
93
80static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) 94static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
81{ 95{
82 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 96 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
83 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 97 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
84 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 98 u32 dvo_reg = intel_dvo->dev.dvo_reg;
85 u32 dvo_reg = dvo->dvo_reg;
86 u32 temp = I915_READ(dvo_reg); 99 u32 temp = I915_READ(dvo_reg);
87 100
88 if (mode == DRM_MODE_DPMS_ON) { 101 if (mode == DRM_MODE_DPMS_ON) {
89 I915_WRITE(dvo_reg, temp | DVO_ENABLE); 102 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
90 I915_READ(dvo_reg); 103 I915_READ(dvo_reg);
91 dvo->dev_ops->dpms(dvo, mode); 104 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
92 } else { 105 } else {
93 dvo->dev_ops->dpms(dvo, mode); 106 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
94 I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); 107 I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
95 I915_READ(dvo_reg); 108 I915_READ(dvo_reg);
96 } 109 }
@@ -100,38 +113,36 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
100 struct drm_display_mode *mode) 113 struct drm_display_mode *mode)
101{ 114{
102 struct drm_encoder *encoder = intel_attached_encoder(connector); 115 struct drm_encoder *encoder = intel_attached_encoder(connector);
103 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 116 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
104 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
105 117
106 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 118 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
107 return MODE_NO_DBLESCAN; 119 return MODE_NO_DBLESCAN;
108 120
109 /* XXX: Validate clock range */ 121 /* XXX: Validate clock range */
110 122
111 if (dvo->panel_fixed_mode) { 123 if (intel_dvo->panel_fixed_mode) {
112 if (mode->hdisplay > dvo->panel_fixed_mode->hdisplay) 124 if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
113 return MODE_PANEL; 125 return MODE_PANEL;
114 if (mode->vdisplay > dvo->panel_fixed_mode->vdisplay) 126 if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
115 return MODE_PANEL; 127 return MODE_PANEL;
116 } 128 }
117 129
118 return dvo->dev_ops->mode_valid(dvo, mode); 130 return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
119} 131}
120 132
121static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, 133static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
122 struct drm_display_mode *mode, 134 struct drm_display_mode *mode,
123 struct drm_display_mode *adjusted_mode) 135 struct drm_display_mode *adjusted_mode)
124{ 136{
125 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 137 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
126 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
127 138
128 /* If we have timings from the BIOS for the panel, put them in 139 /* If we have timings from the BIOS for the panel, put them in
129 * to the adjusted mode. The CRTC will be set up for this mode, 140 * to the adjusted mode. The CRTC will be set up for this mode,
130 * with the panel scaling set up to source from the H/VDisplay 141 * with the panel scaling set up to source from the H/VDisplay
131 * of the original mode. 142 * of the original mode.
132 */ 143 */
133 if (dvo->panel_fixed_mode != NULL) { 144 if (intel_dvo->panel_fixed_mode != NULL) {
134#define C(x) adjusted_mode->x = dvo->panel_fixed_mode->x 145#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
135 C(hdisplay); 146 C(hdisplay);
136 C(hsync_start); 147 C(hsync_start);
137 C(hsync_end); 148 C(hsync_end);
@@ -145,8 +156,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
145#undef C 156#undef C
146 } 157 }
147 158
148 if (dvo->dev_ops->mode_fixup) 159 if (intel_dvo->dev.dev_ops->mode_fixup)
149 return dvo->dev_ops->mode_fixup(dvo, mode, adjusted_mode); 160 return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
150 161
151 return true; 162 return true;
152} 163}
@@ -158,11 +169,10 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
158 struct drm_device *dev = encoder->dev; 169 struct drm_device *dev = encoder->dev;
159 struct drm_i915_private *dev_priv = dev->dev_private; 170 struct drm_i915_private *dev_priv = dev->dev_private;
160 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 171 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
161 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 172 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
162 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
163 int pipe = intel_crtc->pipe; 173 int pipe = intel_crtc->pipe;
164 u32 dvo_val; 174 u32 dvo_val;
165 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; 175 u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
166 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 176 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
167 177
168 switch (dvo_reg) { 178 switch (dvo_reg) {
@@ -178,7 +188,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
178 break; 188 break;
179 } 189 }
180 190
181 dvo->dev_ops->mode_set(dvo, mode, adjusted_mode); 191 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
182 192
183 /* Save the data order, since I don't know what it should be set to. */ 193 /* Save the data order, since I don't know what it should be set to. */
184 dvo_val = I915_READ(dvo_reg) & 194 dvo_val = I915_READ(dvo_reg) &
@@ -214,40 +224,38 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
214static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 224static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
215{ 225{
216 struct drm_encoder *encoder = intel_attached_encoder(connector); 226 struct drm_encoder *encoder = intel_attached_encoder(connector);
217 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 227 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
218 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
219 228
220 return dvo->dev_ops->detect(dvo); 229 return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
221} 230}
222 231
223static int intel_dvo_get_modes(struct drm_connector *connector) 232static int intel_dvo_get_modes(struct drm_connector *connector)
224{ 233{
225 struct drm_encoder *encoder = intel_attached_encoder(connector); 234 struct drm_encoder *encoder = intel_attached_encoder(connector);
226 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 235 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
227 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
228 236
229 /* We should probably have an i2c driver get_modes function for those 237 /* We should probably have an i2c driver get_modes function for those
230 * devices which will have a fixed set of modes determined by the chip 238 * devices which will have a fixed set of modes determined by the chip
231 * (TV-out, for example), but for now with just TMDS and LVDS, 239 * (TV-out, for example), but for now with just TMDS and LVDS,
232 * that's not the case. 240 * that's not the case.
233 */ 241 */
234 intel_ddc_get_modes(connector, intel_encoder->ddc_bus); 242 intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
235 if (!list_empty(&connector->probed_modes)) 243 if (!list_empty(&connector->probed_modes))
236 return 1; 244 return 1;
237 245
238 246 if (intel_dvo->panel_fixed_mode != NULL) {
239 if (dvo->panel_fixed_mode != NULL) {
240 struct drm_display_mode *mode; 247 struct drm_display_mode *mode;
241 mode = drm_mode_duplicate(connector->dev, dvo->panel_fixed_mode); 248 mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
242 if (mode) { 249 if (mode) {
243 drm_mode_probed_add(connector, mode); 250 drm_mode_probed_add(connector, mode);
244 return 1; 251 return 1;
245 } 252 }
246 } 253 }
254
247 return 0; 255 return 0;
248} 256}
249 257
250static void intel_dvo_destroy (struct drm_connector *connector) 258static void intel_dvo_destroy(struct drm_connector *connector)
251{ 259{
252 drm_sysfs_connector_remove(connector); 260 drm_sysfs_connector_remove(connector);
253 drm_connector_cleanup(connector); 261 drm_connector_cleanup(connector);
@@ -277,28 +285,20 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
277 285
278static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 286static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
279{ 287{
280 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 288 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
281 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 289
282 290 if (intel_dvo->dev.dev_ops->destroy)
283 if (dvo) { 291 intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
284 if (dvo->dev_ops->destroy) 292
285 dvo->dev_ops->destroy(dvo); 293 kfree(intel_dvo->panel_fixed_mode);
286 if (dvo->panel_fixed_mode) 294
287 kfree(dvo->panel_fixed_mode); 295 intel_encoder_destroy(encoder);
288 }
289 if (intel_encoder->i2c_bus)
290 intel_i2c_destroy(intel_encoder->i2c_bus);
291 if (intel_encoder->ddc_bus)
292 intel_i2c_destroy(intel_encoder->ddc_bus);
293 drm_encoder_cleanup(encoder);
294 kfree(intel_encoder);
295} 296}
296 297
297static const struct drm_encoder_funcs intel_dvo_enc_funcs = { 298static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
298 .destroy = intel_dvo_enc_destroy, 299 .destroy = intel_dvo_enc_destroy,
299}; 300};
300 301
301
302/** 302/**
303 * Attempts to get a fixed panel timing for LVDS (currently only the i830). 303 * Attempts to get a fixed panel timing for LVDS (currently only the i830).
304 * 304 *
@@ -306,15 +306,13 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
306 * chip being on DVOB/C and having multiple pipes. 306 * chip being on DVOB/C and having multiple pipes.
307 */ 307 */
308static struct drm_display_mode * 308static struct drm_display_mode *
309intel_dvo_get_current_mode (struct drm_connector *connector) 309intel_dvo_get_current_mode(struct drm_connector *connector)
310{ 310{
311 struct drm_device *dev = connector->dev; 311 struct drm_device *dev = connector->dev;
312 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct drm_i915_private *dev_priv = dev->dev_private;
313 struct drm_encoder *encoder = intel_attached_encoder(connector); 313 struct drm_encoder *encoder = intel_attached_encoder(connector);
314 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 314 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
315 struct intel_dvo_device *dvo = intel_encoder->dev_priv; 315 uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
316 uint32_t dvo_reg = dvo->dvo_reg;
317 uint32_t dvo_val = I915_READ(dvo_reg);
318 struct drm_display_mode *mode = NULL; 316 struct drm_display_mode *mode = NULL;
319 317
320 /* If the DVO port is active, that'll be the LVDS, so we can pull out 318 /* If the DVO port is active, that'll be the LVDS, so we can pull out
@@ -327,7 +325,6 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
327 crtc = intel_get_crtc_from_pipe(dev, pipe); 325 crtc = intel_get_crtc_from_pipe(dev, pipe);
328 if (crtc) { 326 if (crtc) {
329 mode = intel_crtc_mode_get(dev, crtc); 327 mode = intel_crtc_mode_get(dev, crtc);
330
331 if (mode) { 328 if (mode) {
332 mode->type |= DRM_MODE_TYPE_PREFERRED; 329 mode->type |= DRM_MODE_TYPE_PREFERRED;
333 if (dvo_val & DVO_HSYNC_ACTIVE_HIGH) 330 if (dvo_val & DVO_HSYNC_ACTIVE_HIGH)
@@ -337,28 +334,32 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
337 } 334 }
338 } 335 }
339 } 336 }
337
340 return mode; 338 return mode;
341} 339}
342 340
343void intel_dvo_init(struct drm_device *dev) 341void intel_dvo_init(struct drm_device *dev)
344{ 342{
345 struct intel_encoder *intel_encoder; 343 struct intel_encoder *intel_encoder;
344 struct intel_dvo *intel_dvo;
346 struct intel_connector *intel_connector; 345 struct intel_connector *intel_connector;
347 struct intel_dvo_device *dvo;
348 struct i2c_adapter *i2cbus = NULL; 346 struct i2c_adapter *i2cbus = NULL;
349 int ret = 0; 347 int ret = 0;
350 int i; 348 int i;
351 int encoder_type = DRM_MODE_ENCODER_NONE; 349 int encoder_type = DRM_MODE_ENCODER_NONE;
352 intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL); 350
353 if (!intel_encoder) 351 intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
352 if (!intel_dvo)
354 return; 353 return;
355 354
356 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 355 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
357 if (!intel_connector) { 356 if (!intel_connector) {
358 kfree(intel_encoder); 357 kfree(intel_dvo);
359 return; 358 return;
360 } 359 }
361 360
361 intel_encoder = &intel_dvo->base;
362
362 /* Set up the DDC bus */ 363 /* Set up the DDC bus */
363 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); 364 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
364 if (!intel_encoder->ddc_bus) 365 if (!intel_encoder->ddc_bus)
@@ -367,10 +368,9 @@ void intel_dvo_init(struct drm_device *dev)
367 /* Now, try to find a controller */ 368 /* Now, try to find a controller */
368 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 369 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
369 struct drm_connector *connector = &intel_connector->base; 370 struct drm_connector *connector = &intel_connector->base;
371 const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
370 int gpio; 372 int gpio;
371 373
372 dvo = &intel_dvo_devices[i];
373
374 /* Allow the I2C driver info to specify the GPIO to be used in 374 /* Allow the I2C driver info to specify the GPIO to be used in
375 * special cases, but otherwise default to what's defined 375 * special cases, but otherwise default to what's defined
376 * in the spec. 376 * in the spec.
@@ -393,11 +393,8 @@ void intel_dvo_init(struct drm_device *dev)
393 continue; 393 continue;
394 } 394 }
395 395
396 if (dvo->dev_ops!= NULL) 396 intel_dvo->dev = *dvo;
397 ret = dvo->dev_ops->init(dvo, i2cbus); 397 ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
398 else
399 ret = false;
400
401 if (!ret) 398 if (!ret)
402 continue; 399 continue;
403 400
@@ -429,9 +426,6 @@ void intel_dvo_init(struct drm_device *dev)
429 connector->interlace_allowed = false; 426 connector->interlace_allowed = false;
430 connector->doublescan_allowed = false; 427 connector->doublescan_allowed = false;
431 428
432 intel_encoder->dev_priv = dvo;
433 intel_encoder->i2c_bus = i2cbus;
434
435 drm_encoder_init(dev, &intel_encoder->enc, 429 drm_encoder_init(dev, &intel_encoder->enc,
436 &intel_dvo_enc_funcs, encoder_type); 430 &intel_dvo_enc_funcs, encoder_type);
437 drm_encoder_helper_add(&intel_encoder->enc, 431 drm_encoder_helper_add(&intel_encoder->enc,
@@ -447,9 +441,9 @@ void intel_dvo_init(struct drm_device *dev)
447 * headers, likely), so for now, just get the current 441 * headers, likely), so for now, just get the current
448 * mode being output through DVO. 442 * mode being output through DVO.
449 */ 443 */
450 dvo->panel_fixed_mode = 444 intel_dvo->panel_fixed_mode =
451 intel_dvo_get_current_mode(connector); 445 intel_dvo_get_current_mode(connector);
452 dvo->panel_wants_dither = true; 446 intel_dvo->panel_wants_dither = true;
453 } 447 }
454 448
455 drm_sysfs_connector_add(connector); 449 drm_sysfs_connector_add(connector);
@@ -461,6 +455,6 @@ void intel_dvo_init(struct drm_device *dev)
461 if (i2cbus != NULL) 455 if (i2cbus != NULL)
462 intel_i2c_destroy(i2cbus); 456 intel_i2c_destroy(i2cbus);
463free_intel: 457free_intel:
464 kfree(intel_encoder); 458 kfree(intel_dvo);
465 kfree(intel_connector); 459 kfree(intel_connector);
466} 460}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 197887ed1823..ccd4c97e6524 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,11 +37,17 @@
37#include "i915_drm.h" 37#include "i915_drm.h"
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40struct intel_hdmi_priv { 40struct intel_hdmi {
41 struct intel_encoder base;
41 u32 sdvox_reg; 42 u32 sdvox_reg;
42 bool has_hdmi_sink; 43 bool has_hdmi_sink;
43}; 44};
44 45
46static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
47{
48 return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
49}
50
45static void intel_hdmi_mode_set(struct drm_encoder *encoder, 51static void intel_hdmi_mode_set(struct drm_encoder *encoder,
46 struct drm_display_mode *mode, 52 struct drm_display_mode *mode,
47 struct drm_display_mode *adjusted_mode) 53 struct drm_display_mode *adjusted_mode)
@@ -50,8 +56,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
50 struct drm_i915_private *dev_priv = dev->dev_private; 56 struct drm_i915_private *dev_priv = dev->dev_private;
51 struct drm_crtc *crtc = encoder->crtc; 57 struct drm_crtc *crtc = encoder->crtc;
52 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 58 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
53 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 59 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
54 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
55 u32 sdvox; 60 u32 sdvox;
56 61
57 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; 62 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
@@ -60,7 +65,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
60 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 65 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
61 sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 66 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
62 67
63 if (hdmi_priv->has_hdmi_sink) { 68 if (intel_hdmi->has_hdmi_sink) {
64 sdvox |= SDVO_AUDIO_ENABLE; 69 sdvox |= SDVO_AUDIO_ENABLE;
65 if (HAS_PCH_CPT(dev)) 70 if (HAS_PCH_CPT(dev))
66 sdvox |= HDMI_MODE_SELECT; 71 sdvox |= HDMI_MODE_SELECT;
@@ -73,26 +78,25 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
73 sdvox |= SDVO_PIPE_B_SELECT; 78 sdvox |= SDVO_PIPE_B_SELECT;
74 } 79 }
75 80
76 I915_WRITE(hdmi_priv->sdvox_reg, sdvox); 81 I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
77 POSTING_READ(hdmi_priv->sdvox_reg); 82 POSTING_READ(intel_hdmi->sdvox_reg);
78} 83}
79 84
80static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) 85static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
81{ 86{
82 struct drm_device *dev = encoder->dev; 87 struct drm_device *dev = encoder->dev;
83 struct drm_i915_private *dev_priv = dev->dev_private; 88 struct drm_i915_private *dev_priv = dev->dev_private;
84 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 89 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
85 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
86 u32 temp; 90 u32 temp;
87 91
88 temp = I915_READ(hdmi_priv->sdvox_reg); 92 temp = I915_READ(intel_hdmi->sdvox_reg);
89 93
90 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 94 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
91 * we do this anyway which shows more stable in testing. 95 * we do this anyway which shows more stable in testing.
92 */ 96 */
93 if (HAS_PCH_SPLIT(dev)) { 97 if (HAS_PCH_SPLIT(dev)) {
94 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 98 I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
95 POSTING_READ(hdmi_priv->sdvox_reg); 99 POSTING_READ(intel_hdmi->sdvox_reg);
96 } 100 }
97 101
98 if (mode != DRM_MODE_DPMS_ON) { 102 if (mode != DRM_MODE_DPMS_ON) {
@@ -101,15 +105,15 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
101 temp |= SDVO_ENABLE; 105 temp |= SDVO_ENABLE;
102 } 106 }
103 107
104 I915_WRITE(hdmi_priv->sdvox_reg, temp); 108 I915_WRITE(intel_hdmi->sdvox_reg, temp);
105 POSTING_READ(hdmi_priv->sdvox_reg); 109 POSTING_READ(intel_hdmi->sdvox_reg);
106 110
107 /* HW workaround, need to write this twice for issue that may result 111 /* HW workaround, need to write this twice for issue that may result
108 * in first write getting masked. 112 * in first write getting masked.
109 */ 113 */
110 if (HAS_PCH_SPLIT(dev)) { 114 if (HAS_PCH_SPLIT(dev)) {
111 I915_WRITE(hdmi_priv->sdvox_reg, temp); 115 I915_WRITE(intel_hdmi->sdvox_reg, temp);
112 POSTING_READ(hdmi_priv->sdvox_reg); 116 POSTING_READ(intel_hdmi->sdvox_reg);
113 } 117 }
114} 118}
115 119
@@ -138,19 +142,17 @@ static enum drm_connector_status
138intel_hdmi_detect(struct drm_connector *connector) 142intel_hdmi_detect(struct drm_connector *connector)
139{ 143{
140 struct drm_encoder *encoder = intel_attached_encoder(connector); 144 struct drm_encoder *encoder = intel_attached_encoder(connector);
141 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 145 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
142 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
143 struct edid *edid = NULL; 146 struct edid *edid = NULL;
144 enum drm_connector_status status = connector_status_disconnected; 147 enum drm_connector_status status = connector_status_disconnected;
145 148
146 hdmi_priv->has_hdmi_sink = false; 149 intel_hdmi->has_hdmi_sink = false;
147 edid = drm_get_edid(connector, 150 edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
148 intel_encoder->ddc_bus);
149 151
150 if (edid) { 152 if (edid) {
151 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 153 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
152 status = connector_status_connected; 154 status = connector_status_connected;
153 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 155 intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
154 } 156 }
155 connector->display_info.raw_edid = NULL; 157 connector->display_info.raw_edid = NULL;
156 kfree(edid); 158 kfree(edid);
@@ -162,13 +164,13 @@ intel_hdmi_detect(struct drm_connector *connector)
162static int intel_hdmi_get_modes(struct drm_connector *connector) 164static int intel_hdmi_get_modes(struct drm_connector *connector)
163{ 165{
164 struct drm_encoder *encoder = intel_attached_encoder(connector); 166 struct drm_encoder *encoder = intel_attached_encoder(connector);
165 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 167 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
166 168
167 /* We should parse the EDID data and find out if it's an HDMI sink so 169 /* We should parse the EDID data and find out if it's an HDMI sink so
168 * we can send audio to it. 170 * we can send audio to it.
169 */ 171 */
170 172
171 return intel_ddc_get_modes(connector, intel_encoder->ddc_bus); 173 return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
172} 174}
173 175
174static void intel_hdmi_destroy(struct drm_connector *connector) 176static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -199,18 +201,8 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
199 .best_encoder = intel_attached_encoder, 201 .best_encoder = intel_attached_encoder,
200}; 202};
201 203
202static void intel_hdmi_enc_destroy(struct drm_encoder *encoder)
203{
204 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
205
206 if (intel_encoder->i2c_bus)
207 intel_i2c_destroy(intel_encoder->i2c_bus);
208 drm_encoder_cleanup(encoder);
209 kfree(intel_encoder);
210}
211
212static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 204static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
213 .destroy = intel_hdmi_enc_destroy, 205 .destroy = intel_encoder_destroy,
214}; 206};
215 207
216void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 208void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
@@ -219,21 +211,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
219 struct drm_connector *connector; 211 struct drm_connector *connector;
220 struct intel_encoder *intel_encoder; 212 struct intel_encoder *intel_encoder;
221 struct intel_connector *intel_connector; 213 struct intel_connector *intel_connector;
222 struct intel_hdmi_priv *hdmi_priv; 214 struct intel_hdmi *intel_hdmi;
223 215
224 intel_encoder = kcalloc(sizeof(struct intel_encoder) + 216 intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
225 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 217 if (!intel_hdmi)
226 if (!intel_encoder)
227 return; 218 return;
228 219
229 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 220 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
230 if (!intel_connector) { 221 if (!intel_connector) {
231 kfree(intel_encoder); 222 kfree(intel_hdmi);
232 return; 223 return;
233 } 224 }
234 225
235 hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1); 226 intel_encoder = &intel_hdmi->base;
236
237 connector = &intel_connector->base; 227 connector = &intel_connector->base;
238 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 228 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
239 DRM_MODE_CONNECTOR_HDMIA); 229 DRM_MODE_CONNECTOR_HDMIA);
@@ -274,8 +264,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
274 if (!intel_encoder->ddc_bus) 264 if (!intel_encoder->ddc_bus)
275 goto err_connector; 265 goto err_connector;
276 266
277 hdmi_priv->sdvox_reg = sdvox_reg; 267 intel_hdmi->sdvox_reg = sdvox_reg;
278 intel_encoder->dev_priv = hdmi_priv;
279 268
280 drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs, 269 drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
281 DRM_MODE_ENCODER_TMDS); 270 DRM_MODE_ENCODER_TMDS);
@@ -298,7 +287,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
298 287
299err_connector: 288err_connector:
300 drm_connector_cleanup(connector); 289 drm_connector_cleanup(connector);
301 kfree(intel_encoder); 290 kfree(intel_hdmi);
302 kfree(intel_connector); 291 kfree(intel_connector);
303 292
304 return; 293 return;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0a2e60059fb3..b819c1081147 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -41,12 +41,18 @@
41#include <linux/acpi.h> 41#include <linux/acpi.h>
42 42
43/* Private structure for the integrated LVDS support */ 43/* Private structure for the integrated LVDS support */
44struct intel_lvds_priv { 44struct intel_lvds {
45 struct intel_encoder base;
45 int fitting_mode; 46 int fitting_mode;
46 u32 pfit_control; 47 u32 pfit_control;
47 u32 pfit_pgm_ratios; 48 u32 pfit_pgm_ratios;
48}; 49};
49 50
51static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
52{
53 return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
54}
55
50/** 56/**
51 * Sets the backlight level. 57 * Sets the backlight level.
52 * 58 *
@@ -90,7 +96,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
90static void intel_lvds_set_power(struct drm_device *dev, bool on) 96static void intel_lvds_set_power(struct drm_device *dev, bool on)
91{ 97{
92 struct drm_i915_private *dev_priv = dev->dev_private; 98 struct drm_i915_private *dev_priv = dev->dev_private;
93 u32 pp_status, ctl_reg, status_reg, lvds_reg; 99 u32 ctl_reg, status_reg, lvds_reg;
94 100
95 if (HAS_PCH_SPLIT(dev)) { 101 if (HAS_PCH_SPLIT(dev)) {
96 ctl_reg = PCH_PP_CONTROL; 102 ctl_reg = PCH_PP_CONTROL;
@@ -108,9 +114,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
108 114
109 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | 115 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
110 POWER_TARGET_ON); 116 POWER_TARGET_ON);
111 do { 117 if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
112 pp_status = I915_READ(status_reg); 118 DRM_ERROR("timed out waiting to enable LVDS pipe");
113 } while ((pp_status & PP_ON) == 0);
114 119
115 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); 120 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
116 } else { 121 } else {
@@ -118,9 +123,8 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
118 123
119 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & 124 I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
120 ~POWER_TARGET_ON); 125 ~POWER_TARGET_ON);
121 do { 126 if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
122 pp_status = I915_READ(status_reg); 127 DRM_ERROR("timed out waiting for LVDS pipe to turn off");
123 } while (pp_status & PP_ON);
124 128
125 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 129 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
126 POSTING_READ(lvds_reg); 130 POSTING_READ(lvds_reg);
@@ -219,9 +223,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
219 struct drm_device *dev = encoder->dev; 223 struct drm_device *dev = encoder->dev;
220 struct drm_i915_private *dev_priv = dev->dev_private; 224 struct drm_i915_private *dev_priv = dev->dev_private;
221 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 225 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
226 struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
222 struct drm_encoder *tmp_encoder; 227 struct drm_encoder *tmp_encoder;
223 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
224 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
225 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 228 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
226 229
227 /* Should never happen!! */ 230 /* Should never happen!! */
@@ -241,26 +244,20 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
241 /* If we don't have a panel mode, there is nothing we can do */ 244 /* If we don't have a panel mode, there is nothing we can do */
242 if (dev_priv->panel_fixed_mode == NULL) 245 if (dev_priv->panel_fixed_mode == NULL)
243 return true; 246 return true;
247
244 /* 248 /*
245 * We have timings from the BIOS for the panel, put them in 249 * We have timings from the BIOS for the panel, put them in
246 * to the adjusted mode. The CRTC will be set up for this mode, 250 * to the adjusted mode. The CRTC will be set up for this mode,
247 * with the panel scaling set up to source from the H/VDisplay 251 * with the panel scaling set up to source from the H/VDisplay
248 * of the original mode. 252 * of the original mode.
249 */ 253 */
250 adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; 254 intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
251 adjusted_mode->hsync_start = 255
252 dev_priv->panel_fixed_mode->hsync_start; 256 if (HAS_PCH_SPLIT(dev)) {
253 adjusted_mode->hsync_end = 257 intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
254 dev_priv->panel_fixed_mode->hsync_end; 258 mode, adjusted_mode);
255 adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; 259 return true;
256 adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; 260 }
257 adjusted_mode->vsync_start =
258 dev_priv->panel_fixed_mode->vsync_start;
259 adjusted_mode->vsync_end =
260 dev_priv->panel_fixed_mode->vsync_end;
261 adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
262 adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
263 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
264 261
265 /* Make sure pre-965s set dither correctly */ 262 /* Make sure pre-965s set dither correctly */
266 if (!IS_I965G(dev)) { 263 if (!IS_I965G(dev)) {
@@ -273,10 +270,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
273 adjusted_mode->vdisplay == mode->vdisplay) 270 adjusted_mode->vdisplay == mode->vdisplay)
274 goto out; 271 goto out;
275 272
276 /* full screen scale for now */
277 if (HAS_PCH_SPLIT(dev))
278 goto out;
279
280 /* 965+ wants fuzzy fitting */ 273 /* 965+ wants fuzzy fitting */
281 if (IS_I965G(dev)) 274 if (IS_I965G(dev))
282 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | 275 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
@@ -288,12 +281,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
288 * to register description and PRM. 281 * to register description and PRM.
289 * Change the value here to see the borders for debugging 282 * Change the value here to see the borders for debugging
290 */ 283 */
291 if (!HAS_PCH_SPLIT(dev)) { 284 I915_WRITE(BCLRPAT_A, 0);
292 I915_WRITE(BCLRPAT_A, 0); 285 I915_WRITE(BCLRPAT_B, 0);
293 I915_WRITE(BCLRPAT_B, 0);
294 }
295 286
296 switch (lvds_priv->fitting_mode) { 287 switch (intel_lvds->fitting_mode) {
297 case DRM_MODE_SCALE_CENTER: 288 case DRM_MODE_SCALE_CENTER:
298 /* 289 /*
299 * For centered modes, we have to calculate border widths & 290 * For centered modes, we have to calculate border widths &
@@ -378,8 +369,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
378 } 369 }
379 370
380out: 371out:
381 lvds_priv->pfit_control = pfit_control; 372 intel_lvds->pfit_control = pfit_control;
382 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; 373 intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
383 dev_priv->lvds_border_bits = border; 374 dev_priv->lvds_border_bits = border;
384 375
385 /* 376 /*
@@ -427,8 +418,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
427{ 418{
428 struct drm_device *dev = encoder->dev; 419 struct drm_device *dev = encoder->dev;
429 struct drm_i915_private *dev_priv = dev->dev_private; 420 struct drm_i915_private *dev_priv = dev->dev_private;
430 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 421 struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
431 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
432 422
433 /* 423 /*
434 * The LVDS pin pair will already have been turned on in the 424 * The LVDS pin pair will already have been turned on in the
@@ -444,8 +434,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
444 * screen. Should be enabled before the pipe is enabled, according to 434 * screen. Should be enabled before the pipe is enabled, according to
445 * register description and PRM. 435 * register description and PRM.
446 */ 436 */
447 I915_WRITE(PFIT_PGM_RATIOS, lvds_priv->pfit_pgm_ratios); 437 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
448 I915_WRITE(PFIT_CONTROL, lvds_priv->pfit_control); 438 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
449} 439}
450 440
451/** 441/**
@@ -600,18 +590,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
600 connector->encoder) { 590 connector->encoder) {
601 struct drm_crtc *crtc = connector->encoder->crtc; 591 struct drm_crtc *crtc = connector->encoder->crtc;
602 struct drm_encoder *encoder = connector->encoder; 592 struct drm_encoder *encoder = connector->encoder;
603 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 593 struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
604 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
605 594
606 if (value == DRM_MODE_SCALE_NONE) { 595 if (value == DRM_MODE_SCALE_NONE) {
607 DRM_DEBUG_KMS("no scaling not supported\n"); 596 DRM_DEBUG_KMS("no scaling not supported\n");
608 return 0; 597 return 0;
609 } 598 }
610 if (lvds_priv->fitting_mode == value) { 599 if (intel_lvds->fitting_mode == value) {
611 /* the LVDS scaling property is not changed */ 600 /* the LVDS scaling property is not changed */
612 return 0; 601 return 0;
613 } 602 }
614 lvds_priv->fitting_mode = value; 603 intel_lvds->fitting_mode = value;
615 if (crtc && crtc->enabled) { 604 if (crtc && crtc->enabled) {
616 /* 605 /*
617 * If the CRTC is enabled, the display will be changed 606 * If the CRTC is enabled, the display will be changed
@@ -647,19 +636,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
647 .destroy = intel_lvds_destroy, 636 .destroy = intel_lvds_destroy,
648}; 637};
649 638
650
651static void intel_lvds_enc_destroy(struct drm_encoder *encoder)
652{
653 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
654
655 if (intel_encoder->ddc_bus)
656 intel_i2c_destroy(intel_encoder->ddc_bus);
657 drm_encoder_cleanup(encoder);
658 kfree(intel_encoder);
659}
660
661static const struct drm_encoder_funcs intel_lvds_enc_funcs = { 639static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
662 .destroy = intel_lvds_enc_destroy, 640 .destroy = intel_encoder_destroy,
663}; 641};
664 642
665static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) 643static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
@@ -843,13 +821,13 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
843void intel_lvds_init(struct drm_device *dev) 821void intel_lvds_init(struct drm_device *dev)
844{ 822{
845 struct drm_i915_private *dev_priv = dev->dev_private; 823 struct drm_i915_private *dev_priv = dev->dev_private;
824 struct intel_lvds *intel_lvds;
846 struct intel_encoder *intel_encoder; 825 struct intel_encoder *intel_encoder;
847 struct intel_connector *intel_connector; 826 struct intel_connector *intel_connector;
848 struct drm_connector *connector; 827 struct drm_connector *connector;
849 struct drm_encoder *encoder; 828 struct drm_encoder *encoder;
850 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 829 struct drm_display_mode *scan; /* *modes, *bios_mode; */
851 struct drm_crtc *crtc; 830 struct drm_crtc *crtc;
852 struct intel_lvds_priv *lvds_priv;
853 u32 lvds; 831 u32 lvds;
854 int pipe, gpio = GPIOC; 832 int pipe, gpio = GPIOC;
855 833
@@ -872,20 +850,20 @@ void intel_lvds_init(struct drm_device *dev)
872 gpio = PCH_GPIOC; 850 gpio = PCH_GPIOC;
873 } 851 }
874 852
875 intel_encoder = kzalloc(sizeof(struct intel_encoder) + 853 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
876 sizeof(struct intel_lvds_priv), GFP_KERNEL); 854 if (!intel_lvds) {
877 if (!intel_encoder) {
878 return; 855 return;
879 } 856 }
880 857
881 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 858 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
882 if (!intel_connector) { 859 if (!intel_connector) {
883 kfree(intel_encoder); 860 kfree(intel_lvds);
884 return; 861 return;
885 } 862 }
886 863
887 connector = &intel_connector->base; 864 intel_encoder = &intel_lvds->base;
888 encoder = &intel_encoder->enc; 865 encoder = &intel_encoder->enc;
866 connector = &intel_connector->base;
889 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, 867 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
890 DRM_MODE_CONNECTOR_LVDS); 868 DRM_MODE_CONNECTOR_LVDS);
891 869
@@ -905,8 +883,6 @@ void intel_lvds_init(struct drm_device *dev)
905 connector->interlace_allowed = false; 883 connector->interlace_allowed = false;
906 connector->doublescan_allowed = false; 884 connector->doublescan_allowed = false;
907 885
908 lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
909 intel_encoder->dev_priv = lvds_priv;
910 /* create the scaling mode property */ 886 /* create the scaling mode property */
911 drm_mode_create_scaling_mode_property(dev); 887 drm_mode_create_scaling_mode_property(dev);
912 /* 888 /*
@@ -916,7 +892,7 @@ void intel_lvds_init(struct drm_device *dev)
916 drm_connector_attach_property(&intel_connector->base, 892 drm_connector_attach_property(&intel_connector->base,
917 dev->mode_config.scaling_mode_property, 893 dev->mode_config.scaling_mode_property,
918 DRM_MODE_SCALE_ASPECT); 894 DRM_MODE_SCALE_ASPECT);
919 lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT; 895 intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
920 /* 896 /*
921 * LVDS discovery: 897 * LVDS discovery:
922 * 1) check for EDID on DDC 898 * 1) check for EDID on DDC
@@ -1024,6 +1000,6 @@ failed:
1024 intel_i2c_destroy(intel_encoder->ddc_bus); 1000 intel_i2c_destroy(intel_encoder->ddc_bus);
1025 drm_connector_cleanup(connector); 1001 drm_connector_cleanup(connector);
1026 drm_encoder_cleanup(encoder); 1002 drm_encoder_cleanup(encoder);
1027 kfree(intel_encoder); 1003 kfree(intel_lvds);
1028 kfree(intel_connector); 1004 kfree(intel_connector);
1029} 1005}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d39aea24eabe..1d306a458be6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,6 +25,8 @@
25 * 25 *
26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c 26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
27 */ 27 */
28
29#include <linux/seq_file.h>
28#include "drmP.h" 30#include "drmP.h"
29#include "drm.h" 31#include "drm.h"
30#include "i915_drm.h" 32#include "i915_drm.h"
@@ -1367,7 +1369,8 @@ void intel_setup_overlay(struct drm_device *dev)
1367 overlay->flip_addr = overlay->reg_bo->gtt_offset; 1369 overlay->flip_addr = overlay->reg_bo->gtt_offset;
1368 } else { 1370 } else {
1369 ret = i915_gem_attach_phys_object(dev, reg_bo, 1371 ret = i915_gem_attach_phys_object(dev, reg_bo,
1370 I915_GEM_PHYS_OVERLAY_REGS); 1372 I915_GEM_PHYS_OVERLAY_REGS,
1373 0);
1371 if (ret) { 1374 if (ret) {
1372 DRM_ERROR("failed to attach phys overlay regs\n"); 1375 DRM_ERROR("failed to attach phys overlay regs\n");
1373 goto out_free_bo; 1376 goto out_free_bo;
@@ -1416,3 +1419,99 @@ void intel_cleanup_overlay(struct drm_device *dev)
1416 kfree(dev_priv->overlay); 1419 kfree(dev_priv->overlay);
1417 } 1420 }
1418} 1421}
1422
1423struct intel_overlay_error_state {
1424 struct overlay_registers regs;
1425 unsigned long base;
1426 u32 dovsta;
1427 u32 isr;
1428};
1429
1430struct intel_overlay_error_state *
1431intel_overlay_capture_error_state(struct drm_device *dev)
1432{
1433 drm_i915_private_t *dev_priv = dev->dev_private;
1434 struct intel_overlay *overlay = dev_priv->overlay;
1435 struct intel_overlay_error_state *error;
1436 struct overlay_registers __iomem *regs;
1437
1438 if (!overlay || !overlay->active)
1439 return NULL;
1440
1441 error = kmalloc(sizeof(*error), GFP_ATOMIC);
1442 if (error == NULL)
1443 return NULL;
1444
1445 error->dovsta = I915_READ(DOVSTA);
1446 error->isr = I915_READ(ISR);
1447 if (OVERLAY_NONPHYSICAL(overlay->dev))
1448 error->base = (long) overlay->reg_bo->gtt_offset;
1449 else
1450 error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
1451
1452 regs = intel_overlay_map_regs_atomic(overlay);
1453 if (!regs)
1454 goto err;
1455
1456 memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
1457 intel_overlay_unmap_regs_atomic(overlay);
1458
1459 return error;
1460
1461err:
1462 kfree(error);
1463 return NULL;
1464}
1465
1466void
1467intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error)
1468{
1469 seq_printf(m, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
1470 error->dovsta, error->isr);
1471 seq_printf(m, " Register file at 0x%08lx:\n",
1472 error->base);
1473
1474#define P(x) seq_printf(m, " " #x ": 0x%08x\n", error->regs.x)
1475 P(OBUF_0Y);
1476 P(OBUF_1Y);
1477 P(OBUF_0U);
1478 P(OBUF_0V);
1479 P(OBUF_1U);
1480 P(OBUF_1V);
1481 P(OSTRIDE);
1482 P(YRGB_VPH);
1483 P(UV_VPH);
1484 P(HORZ_PH);
1485 P(INIT_PHS);
1486 P(DWINPOS);
1487 P(DWINSZ);
1488 P(SWIDTH);
1489 P(SWIDTHSW);
1490 P(SHEIGHT);
1491 P(YRGBSCALE);
1492 P(UVSCALE);
1493 P(OCLRC0);
1494 P(OCLRC1);
1495 P(DCLRKV);
1496 P(DCLRKM);
1497 P(SCLRKVH);
1498 P(SCLRKVL);
1499 P(SCLRKEN);
1500 P(OCONFIG);
1501 P(OCMD);
1502 P(OSTART_0Y);
1503 P(OSTART_1Y);
1504 P(OSTART_0U);
1505 P(OSTART_0V);
1506 P(OSTART_1U);
1507 P(OSTART_1V);
1508 P(OTILEOFF_0Y);
1509 P(OTILEOFF_1Y);
1510 P(OTILEOFF_0U);
1511 P(OTILEOFF_0V);
1512 P(OTILEOFF_1U);
1513 P(OTILEOFF_1V);
1514 P(FASTHSCALE);
1515 P(UVSCALEV);
1516#undef P
1517}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
new file mode 100644
index 000000000000..e7f5299d9d57
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -0,0 +1,111 @@
1/*
2 * Copyright © 2006-2010 Intel Corporation
3 * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Eric Anholt <eric@anholt.net>
26 * Dave Airlie <airlied@linux.ie>
27 * Jesse Barnes <jesse.barnes@intel.com>
28 * Chris Wilson <chris@chris-wilson.co.uk>
29 */
30
31#include "intel_drv.h"
32
33void
34intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
35 struct drm_display_mode *adjusted_mode)
36{
37 adjusted_mode->hdisplay = fixed_mode->hdisplay;
38 adjusted_mode->hsync_start = fixed_mode->hsync_start;
39 adjusted_mode->hsync_end = fixed_mode->hsync_end;
40 adjusted_mode->htotal = fixed_mode->htotal;
41
42 adjusted_mode->vdisplay = fixed_mode->vdisplay;
43 adjusted_mode->vsync_start = fixed_mode->vsync_start;
44 adjusted_mode->vsync_end = fixed_mode->vsync_end;
45 adjusted_mode->vtotal = fixed_mode->vtotal;
46
47 adjusted_mode->clock = fixed_mode->clock;
48
49 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
50}
51
52/* adjusted_mode has been preset to be the panel's fixed mode */
53void
54intel_pch_panel_fitting(struct drm_device *dev,
55 int fitting_mode,
56 struct drm_display_mode *mode,
57 struct drm_display_mode *adjusted_mode)
58{
59 struct drm_i915_private *dev_priv = dev->dev_private;
60 int x, y, width, height;
61
62 x = y = width = height = 0;
63
64 /* Native modes don't need fitting */
65 if (adjusted_mode->hdisplay == mode->hdisplay &&
66 adjusted_mode->vdisplay == mode->vdisplay)
67 goto done;
68
69 switch (fitting_mode) {
70 case DRM_MODE_SCALE_CENTER:
71 width = mode->hdisplay;
72 height = mode->vdisplay;
73 x = (adjusted_mode->hdisplay - width + 1)/2;
74 y = (adjusted_mode->vdisplay - height + 1)/2;
75 break;
76
77 case DRM_MODE_SCALE_ASPECT:
78 /* Scale but preserve the aspect ratio */
79 {
80 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
81 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
82 if (scaled_width > scaled_height) { /* pillar */
83 width = scaled_height / mode->vdisplay;
84 x = (adjusted_mode->hdisplay - width + 1) / 2;
85 y = 0;
86 height = adjusted_mode->vdisplay;
87 } else if (scaled_width < scaled_height) { /* letter */
88 height = scaled_width / mode->hdisplay;
89 y = (adjusted_mode->vdisplay - height + 1) / 2;
90 x = 0;
91 width = adjusted_mode->hdisplay;
92 } else {
93 x = y = 0;
94 width = adjusted_mode->hdisplay;
95 height = adjusted_mode->vdisplay;
96 }
97 }
98 break;
99
100 default:
101 case DRM_MODE_SCALE_FULLSCREEN:
102 x = y = 0;
103 width = adjusted_mode->hdisplay;
104 height = adjusted_mode->vdisplay;
105 break;
106 }
107
108done:
109 dev_priv->pch_pf_pos = (x << 16) | y;
110 dev_priv->pch_pf_size = (width << 16) | height;
111}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 26362f8495a8..cb3508f78bc3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,18 +33,35 @@
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35 35
36static u32 i915_gem_get_seqno(struct drm_device *dev)
37{
38 drm_i915_private_t *dev_priv = dev->dev_private;
39 u32 seqno;
40
41 seqno = dev_priv->next_seqno;
42
43 /* reserve 0 for non-seqno */
44 if (++dev_priv->next_seqno == 0)
45 dev_priv->next_seqno = 1;
46
47 return seqno;
48}
49
36static void 50static void
37render_ring_flush(struct drm_device *dev, 51render_ring_flush(struct drm_device *dev,
38 struct intel_ring_buffer *ring, 52 struct intel_ring_buffer *ring,
39 u32 invalidate_domains, 53 u32 invalidate_domains,
40 u32 flush_domains) 54 u32 flush_domains)
41{ 55{
56 drm_i915_private_t *dev_priv = dev->dev_private;
57 u32 cmd;
58
42#if WATCH_EXEC 59#if WATCH_EXEC
43 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 60 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
44 invalidate_domains, flush_domains); 61 invalidate_domains, flush_domains);
45#endif 62#endif
46 u32 cmd; 63
47 trace_i915_gem_request_flush(dev, ring->next_seqno, 64 trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
48 invalidate_domains, flush_domains); 65 invalidate_domains, flush_domains);
49 66
50 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 67 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
@@ -203,9 +220,13 @@ static int init_render_ring(struct drm_device *dev,
203{ 220{
204 drm_i915_private_t *dev_priv = dev->dev_private; 221 drm_i915_private_t *dev_priv = dev->dev_private;
205 int ret = init_ring_common(dev, ring); 222 int ret = init_ring_common(dev, ring);
223 int mode;
224
206 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 225 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
207 I915_WRITE(MI_MODE, 226 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
208 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 227 if (IS_GEN6(dev))
228 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
229 I915_WRITE(MI_MODE, mode);
209 } 230 }
210 return ret; 231 return ret;
211} 232}
@@ -233,9 +254,10 @@ render_ring_add_request(struct drm_device *dev,
233 struct drm_file *file_priv, 254 struct drm_file *file_priv,
234 u32 flush_domains) 255 u32 flush_domains)
235{ 256{
236 u32 seqno;
237 drm_i915_private_t *dev_priv = dev->dev_private; 257 drm_i915_private_t *dev_priv = dev->dev_private;
238 seqno = intel_ring_get_seqno(dev, ring); 258 u32 seqno;
259
260 seqno = i915_gem_get_seqno(dev);
239 261
240 if (IS_GEN6(dev)) { 262 if (IS_GEN6(dev)) {
241 BEGIN_LP_RING(6); 263 BEGIN_LP_RING(6);
@@ -405,7 +427,9 @@ bsd_ring_add_request(struct drm_device *dev,
405 u32 flush_domains) 427 u32 flush_domains)
406{ 428{
407 u32 seqno; 429 u32 seqno;
408 seqno = intel_ring_get_seqno(dev, ring); 430
431 seqno = i915_gem_get_seqno(dev);
432
409 intel_ring_begin(dev, ring, 4); 433 intel_ring_begin(dev, ring, 4);
410 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); 434 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
411 intel_ring_emit(dev, ring, 435 intel_ring_emit(dev, ring,
@@ -479,7 +503,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
479 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 503 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
480 exec_len = (uint32_t) exec->batch_len; 504 exec_len = (uint32_t) exec->batch_len;
481 505
482 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); 506 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
483 507
484 count = nbox ? nbox : 1; 508 count = nbox ? nbox : 1;
485 509
@@ -515,7 +539,16 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
515 intel_ring_advance(dev, ring); 539 intel_ring_advance(dev, ring);
516 } 540 }
517 541
542 if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
543 intel_ring_begin(dev, ring, 2);
544 intel_ring_emit(dev, ring, MI_FLUSH |
545 MI_NO_WRITE_FLUSH |
546 MI_INVALIDATE_ISP );
547 intel_ring_emit(dev, ring, MI_NOOP);
548 intel_ring_advance(dev, ring);
549 }
518 /* XXX breadcrumb */ 550 /* XXX breadcrumb */
551
519 return 0; 552 return 0;
520} 553}
521 554
@@ -588,9 +621,10 @@ err:
588int intel_init_ring_buffer(struct drm_device *dev, 621int intel_init_ring_buffer(struct drm_device *dev,
589 struct intel_ring_buffer *ring) 622 struct intel_ring_buffer *ring)
590{ 623{
591 int ret;
592 struct drm_i915_gem_object *obj_priv; 624 struct drm_i915_gem_object *obj_priv;
593 struct drm_gem_object *obj; 625 struct drm_gem_object *obj;
626 int ret;
627
594 ring->dev = dev; 628 ring->dev = dev;
595 629
596 if (I915_NEED_GFX_HWS(dev)) { 630 if (I915_NEED_GFX_HWS(dev)) {
@@ -603,16 +637,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
603 if (obj == NULL) { 637 if (obj == NULL) {
604 DRM_ERROR("Failed to allocate ringbuffer\n"); 638 DRM_ERROR("Failed to allocate ringbuffer\n");
605 ret = -ENOMEM; 639 ret = -ENOMEM;
606 goto cleanup; 640 goto err_hws;
607 } 641 }
608 642
609 ring->gem_object = obj; 643 ring->gem_object = obj;
610 644
611 ret = i915_gem_object_pin(obj, ring->alignment); 645 ret = i915_gem_object_pin(obj, ring->alignment);
612 if (ret != 0) { 646 if (ret)
613 drm_gem_object_unreference(obj); 647 goto err_unref;
614 goto cleanup;
615 }
616 648
617 obj_priv = to_intel_bo(obj); 649 obj_priv = to_intel_bo(obj);
618 ring->map.size = ring->size; 650 ring->map.size = ring->size;
@@ -624,18 +656,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
624 drm_core_ioremap_wc(&ring->map, dev); 656 drm_core_ioremap_wc(&ring->map, dev);
625 if (ring->map.handle == NULL) { 657 if (ring->map.handle == NULL) {
626 DRM_ERROR("Failed to map ringbuffer.\n"); 658 DRM_ERROR("Failed to map ringbuffer.\n");
627 i915_gem_object_unpin(obj);
628 drm_gem_object_unreference(obj);
629 ret = -EINVAL; 659 ret = -EINVAL;
630 goto cleanup; 660 goto err_unpin;
631 } 661 }
632 662
633 ring->virtual_start = ring->map.handle; 663 ring->virtual_start = ring->map.handle;
634 ret = ring->init(dev, ring); 664 ret = ring->init(dev, ring);
635 if (ret != 0) { 665 if (ret)
636 intel_cleanup_ring_buffer(dev, ring); 666 goto err_unmap;
637 return ret;
638 }
639 667
640 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 668 if (!drm_core_check_feature(dev, DRIVER_MODESET))
641 i915_kernel_lost_context(dev); 669 i915_kernel_lost_context(dev);
@@ -649,7 +677,15 @@ int intel_init_ring_buffer(struct drm_device *dev,
649 INIT_LIST_HEAD(&ring->active_list); 677 INIT_LIST_HEAD(&ring->active_list);
650 INIT_LIST_HEAD(&ring->request_list); 678 INIT_LIST_HEAD(&ring->request_list);
651 return ret; 679 return ret;
652cleanup: 680
681err_unmap:
682 drm_core_ioremapfree(&ring->map, dev);
683err_unpin:
684 i915_gem_object_unpin(obj);
685err_unref:
686 drm_gem_object_unreference(obj);
687 ring->gem_object = NULL;
688err_hws:
653 cleanup_status_page(dev, ring); 689 cleanup_status_page(dev, ring);
654 return ret; 690 return ret;
655} 691}
@@ -682,9 +718,11 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
682 } 718 }
683 719
684 virt = (unsigned int *)(ring->virtual_start + ring->tail); 720 virt = (unsigned int *)(ring->virtual_start + ring->tail);
685 rem /= 4; 721 rem /= 8;
686 while (rem--) 722 while (rem--) {
723 *virt++ = MI_NOOP;
687 *virt++ = MI_NOOP; 724 *virt++ = MI_NOOP;
725 }
688 726
689 ring->tail = 0; 727 ring->tail = 0;
690 ring->space = ring->head - 8; 728 ring->space = ring->head - 8;
@@ -729,21 +767,14 @@ void intel_ring_begin(struct drm_device *dev,
729 intel_wrap_ring_buffer(dev, ring); 767 intel_wrap_ring_buffer(dev, ring);
730 if (unlikely(ring->space < n)) 768 if (unlikely(ring->space < n))
731 intel_wait_ring_buffer(dev, ring, n); 769 intel_wait_ring_buffer(dev, ring, n);
732}
733 770
734void intel_ring_emit(struct drm_device *dev, 771 ring->space -= n;
735 struct intel_ring_buffer *ring, unsigned int data)
736{
737 unsigned int *virt = ring->virtual_start + ring->tail;
738 *virt = data;
739 ring->tail += 4;
740 ring->tail &= ring->size - 1;
741 ring->space -= 4;
742} 772}
743 773
744void intel_ring_advance(struct drm_device *dev, 774void intel_ring_advance(struct drm_device *dev,
745 struct intel_ring_buffer *ring) 775 struct intel_ring_buffer *ring)
746{ 776{
777 ring->tail &= ring->size - 1;
747 ring->advance_ring(dev, ring); 778 ring->advance_ring(dev, ring);
748} 779}
749 780
@@ -762,18 +793,6 @@ void intel_fill_struct(struct drm_device *dev,
762 intel_ring_advance(dev, ring); 793 intel_ring_advance(dev, ring);
763} 794}
764 795
765u32 intel_ring_get_seqno(struct drm_device *dev,
766 struct intel_ring_buffer *ring)
767{
768 u32 seqno;
769 seqno = ring->next_seqno;
770
771 /* reserve 0 for non-seqno */
772 if (++ring->next_seqno == 0)
773 ring->next_seqno = 1;
774 return seqno;
775}
776
777struct intel_ring_buffer render_ring = { 796struct intel_ring_buffer render_ring = {
778 .name = "render ring", 797 .name = "render ring",
779 .regs = { 798 .regs = {
@@ -791,7 +810,6 @@ struct intel_ring_buffer render_ring = {
791 .head = 0, 810 .head = 0,
792 .tail = 0, 811 .tail = 0,
793 .space = 0, 812 .space = 0,
794 .next_seqno = 1,
795 .user_irq_refcount = 0, 813 .user_irq_refcount = 0,
796 .irq_gem_seqno = 0, 814 .irq_gem_seqno = 0,
797 .waiting_gem_seqno = 0, 815 .waiting_gem_seqno = 0,
@@ -830,7 +848,6 @@ struct intel_ring_buffer bsd_ring = {
830 .head = 0, 848 .head = 0,
831 .tail = 0, 849 .tail = 0,
832 .space = 0, 850 .space = 0,
833 .next_seqno = 1,
834 .user_irq_refcount = 0, 851 .user_irq_refcount = 0,
835 .irq_gem_seqno = 0, 852 .irq_gem_seqno = 0,
836 .waiting_gem_seqno = 0, 853 .waiting_gem_seqno = 0,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d5568d3766de..525e7d3edda8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -26,7 +26,6 @@ struct intel_ring_buffer {
26 unsigned int head; 26 unsigned int head;
27 unsigned int tail; 27 unsigned int tail;
28 unsigned int space; 28 unsigned int space;
29 u32 next_seqno;
30 struct intel_hw_status_page status_page; 29 struct intel_hw_status_page status_page;
31 30
32 u32 irq_gem_seqno; /* last seq seem at irq time */ 31 u32 irq_gem_seqno; /* last seq seem at irq time */
@@ -106,8 +105,16 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
106 struct intel_ring_buffer *ring); 105 struct intel_ring_buffer *ring);
107void intel_ring_begin(struct drm_device *dev, 106void intel_ring_begin(struct drm_device *dev,
108 struct intel_ring_buffer *ring, int n); 107 struct intel_ring_buffer *ring, int n);
109void intel_ring_emit(struct drm_device *dev, 108
110 struct intel_ring_buffer *ring, u32 data); 109static inline void intel_ring_emit(struct drm_device *dev,
110 struct intel_ring_buffer *ring,
111 unsigned int data)
112{
113 unsigned int *virt = ring->virtual_start + ring->tail;
114 *virt = data;
115 ring->tail += 4;
116}
117
111void intel_fill_struct(struct drm_device *dev, 118void intel_fill_struct(struct drm_device *dev,
112 struct intel_ring_buffer *ring, 119 struct intel_ring_buffer *ring,
113 void *data, 120 void *data,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d9d4d51aa89e..e3b7a7ee39cb 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -31,8 +31,8 @@
31#include "drmP.h" 31#include "drmP.h"
32#include "drm.h" 32#include "drm.h"
33#include "drm_crtc.h" 33#include "drm_crtc.h"
34#include "intel_drv.h"
35#include "drm_edid.h" 34#include "drm_edid.h"
35#include "intel_drv.h"
36#include "i915_drm.h" 36#include "i915_drm.h"
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include "intel_sdvo_regs.h" 38#include "intel_sdvo_regs.h"
@@ -47,9 +47,10 @@
47 47
48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) 48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
49#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 49#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
50#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
50 51
51 52
52static char *tv_format_names[] = { 53static const char *tv_format_names[] = {
53 "NTSC_M" , "NTSC_J" , "NTSC_443", 54 "NTSC_M" , "NTSC_J" , "NTSC_443",
54 "PAL_B" , "PAL_D" , "PAL_G" , 55 "PAL_B" , "PAL_D" , "PAL_G" ,
55 "PAL_H" , "PAL_I" , "PAL_M" , 56 "PAL_H" , "PAL_I" , "PAL_M" ,
@@ -61,7 +62,9 @@ static char *tv_format_names[] = {
61 62
62#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names)) 63#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
63 64
64struct intel_sdvo_priv { 65struct intel_sdvo {
66 struct intel_encoder base;
67
65 u8 slave_addr; 68 u8 slave_addr;
66 69
67 /* Register for the SDVO device: SDVOB or SDVOC */ 70 /* Register for the SDVO device: SDVOB or SDVOC */
@@ -95,7 +98,7 @@ struct intel_sdvo_priv {
95 bool is_tv; 98 bool is_tv;
96 99
97 /* This is for current tv format name */ 100 /* This is for current tv format name */
98 char *tv_format_name; 101 int tv_format_index;
99 102
100 /** 103 /**
101 * This is set if we treat the device as HDMI, instead of DVI. 104 * This is set if we treat the device as HDMI, instead of DVI.
@@ -132,37 +135,40 @@ struct intel_sdvo_priv {
132}; 135};
133 136
134struct intel_sdvo_connector { 137struct intel_sdvo_connector {
138 struct intel_connector base;
139
135 /* Mark the type of connector */ 140 /* Mark the type of connector */
136 uint16_t output_flag; 141 uint16_t output_flag;
137 142
138 /* This contains all current supported TV format */ 143 /* This contains all current supported TV format */
139 char *tv_format_supported[TV_FORMAT_NUM]; 144 u8 tv_format_supported[TV_FORMAT_NUM];
140 int format_supported_num; 145 int format_supported_num;
141 struct drm_property *tv_format_property; 146 struct drm_property *tv_format;
142 struct drm_property *tv_format_name_property[TV_FORMAT_NUM];
143
144 /**
145 * Returned SDTV resolutions allowed for the current format, if the
146 * device reported it.
147 */
148 struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
149 147
150 /* add the property for the SDVO-TV */ 148 /* add the property for the SDVO-TV */
151 struct drm_property *left_property; 149 struct drm_property *left;
152 struct drm_property *right_property; 150 struct drm_property *right;
153 struct drm_property *top_property; 151 struct drm_property *top;
154 struct drm_property *bottom_property; 152 struct drm_property *bottom;
155 struct drm_property *hpos_property; 153 struct drm_property *hpos;
156 struct drm_property *vpos_property; 154 struct drm_property *vpos;
155 struct drm_property *contrast;
156 struct drm_property *saturation;
157 struct drm_property *hue;
158 struct drm_property *sharpness;
159 struct drm_property *flicker_filter;
160 struct drm_property *flicker_filter_adaptive;
161 struct drm_property *flicker_filter_2d;
162 struct drm_property *tv_chroma_filter;
163 struct drm_property *tv_luma_filter;
164 struct drm_property *dot_crawl;
157 165
158 /* add the property for the SDVO-TV/LVDS */ 166 /* add the property for the SDVO-TV/LVDS */
159 struct drm_property *brightness_property; 167 struct drm_property *brightness;
160 struct drm_property *contrast_property;
161 struct drm_property *saturation_property;
162 struct drm_property *hue_property;
163 168
164 /* Add variable to record current setting for the above property */ 169 /* Add variable to record current setting for the above property */
165 u32 left_margin, right_margin, top_margin, bottom_margin; 170 u32 left_margin, right_margin, top_margin, bottom_margin;
171
166 /* this is to get the range of margin.*/ 172 /* this is to get the range of margin.*/
167 u32 max_hscan, max_vscan; 173 u32 max_hscan, max_vscan;
168 u32 max_hpos, cur_hpos; 174 u32 max_hpos, cur_hpos;
@@ -171,36 +177,54 @@ struct intel_sdvo_connector {
171 u32 cur_contrast, max_contrast; 177 u32 cur_contrast, max_contrast;
172 u32 cur_saturation, max_saturation; 178 u32 cur_saturation, max_saturation;
173 u32 cur_hue, max_hue; 179 u32 cur_hue, max_hue;
180 u32 cur_sharpness, max_sharpness;
181 u32 cur_flicker_filter, max_flicker_filter;
182 u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
183 u32 cur_flicker_filter_2d, max_flicker_filter_2d;
184 u32 cur_tv_chroma_filter, max_tv_chroma_filter;
185 u32 cur_tv_luma_filter, max_tv_luma_filter;
186 u32 cur_dot_crawl, max_dot_crawl;
174}; 187};
175 188
189static struct intel_sdvo *enc_to_intel_sdvo(struct drm_encoder *encoder)
190{
191 return container_of(enc_to_intel_encoder(encoder), struct intel_sdvo, base);
192}
193
194static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
195{
196 return container_of(to_intel_connector(connector), struct intel_sdvo_connector, base);
197}
198
176static bool 199static bool
177intel_sdvo_output_setup(struct intel_encoder *intel_encoder, 200intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
178 uint16_t flags); 201static bool
179static void 202intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
180intel_sdvo_tv_create_property(struct drm_connector *connector, int type); 203 struct intel_sdvo_connector *intel_sdvo_connector,
181static void 204 int type);
182intel_sdvo_create_enhance_property(struct drm_connector *connector); 205static bool
206intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
207 struct intel_sdvo_connector *intel_sdvo_connector);
183 208
184/** 209/**
185 * Writes the SDVOB or SDVOC with the given value, but always writes both 210 * Writes the SDVOB or SDVOC with the given value, but always writes both
186 * SDVOB and SDVOC to work around apparent hardware issues (according to 211 * SDVOB and SDVOC to work around apparent hardware issues (according to
187 * comments in the BIOS). 212 * comments in the BIOS).
188 */ 213 */
189static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val) 214static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
190{ 215{
191 struct drm_device *dev = intel_encoder->enc.dev; 216 struct drm_device *dev = intel_sdvo->base.enc.dev;
192 struct drm_i915_private *dev_priv = dev->dev_private; 217 struct drm_i915_private *dev_priv = dev->dev_private;
193 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
194 u32 bval = val, cval = val; 218 u32 bval = val, cval = val;
195 int i; 219 int i;
196 220
197 if (sdvo_priv->sdvo_reg == PCH_SDVOB) { 221 if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
198 I915_WRITE(sdvo_priv->sdvo_reg, val); 222 I915_WRITE(intel_sdvo->sdvo_reg, val);
199 I915_READ(sdvo_priv->sdvo_reg); 223 I915_READ(intel_sdvo->sdvo_reg);
200 return; 224 return;
201 } 225 }
202 226
203 if (sdvo_priv->sdvo_reg == SDVOB) { 227 if (intel_sdvo->sdvo_reg == SDVOB) {
204 cval = I915_READ(SDVOC); 228 cval = I915_READ(SDVOC);
205 } else { 229 } else {
206 bval = I915_READ(SDVOB); 230 bval = I915_READ(SDVOB);
@@ -219,33 +243,27 @@ static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
219 } 243 }
220} 244}
221 245
222static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr, 246static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
223 u8 *ch)
224{ 247{
225 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 248 u8 out_buf[2] = { addr, 0 };
226 u8 out_buf[2];
227 u8 buf[2]; 249 u8 buf[2];
228 int ret;
229
230 struct i2c_msg msgs[] = { 250 struct i2c_msg msgs[] = {
231 { 251 {
232 .addr = sdvo_priv->slave_addr >> 1, 252 .addr = intel_sdvo->slave_addr >> 1,
233 .flags = 0, 253 .flags = 0,
234 .len = 1, 254 .len = 1,
235 .buf = out_buf, 255 .buf = out_buf,
236 }, 256 },
237 { 257 {
238 .addr = sdvo_priv->slave_addr >> 1, 258 .addr = intel_sdvo->slave_addr >> 1,
239 .flags = I2C_M_RD, 259 .flags = I2C_M_RD,
240 .len = 1, 260 .len = 1,
241 .buf = buf, 261 .buf = buf,
242 } 262 }
243 }; 263 };
264 int ret;
244 265
245 out_buf[0] = addr; 266 if ((ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 2)) == 2)
246 out_buf[1] = 0;
247
248 if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
249 { 267 {
250 *ch = buf[0]; 268 *ch = buf[0];
251 return true; 269 return true;
@@ -255,35 +273,26 @@ static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
255 return false; 273 return false;
256} 274}
257 275
258static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr, 276static bool intel_sdvo_write_byte(struct intel_sdvo *intel_sdvo, int addr, u8 ch)
259 u8 ch)
260{ 277{
261 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 278 u8 out_buf[2] = { addr, ch };
262 u8 out_buf[2];
263 struct i2c_msg msgs[] = { 279 struct i2c_msg msgs[] = {
264 { 280 {
265 .addr = sdvo_priv->slave_addr >> 1, 281 .addr = intel_sdvo->slave_addr >> 1,
266 .flags = 0, 282 .flags = 0,
267 .len = 2, 283 .len = 2,
268 .buf = out_buf, 284 .buf = out_buf,
269 } 285 }
270 }; 286 };
271 287
272 out_buf[0] = addr; 288 return i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 1) == 1;
273 out_buf[1] = ch;
274
275 if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
276 {
277 return true;
278 }
279 return false;
280} 289}
281 290
282#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} 291#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
283/** Mapping of command numbers to names, for debug output */ 292/** Mapping of command numbers to names, for debug output */
284static const struct _sdvo_cmd_name { 293static const struct _sdvo_cmd_name {
285 u8 cmd; 294 u8 cmd;
286 char *name; 295 const char *name;
287} sdvo_cmd_names[] = { 296} sdvo_cmd_names[] = {
288 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), 297 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
289 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), 298 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
@@ -328,13 +337,14 @@ static const struct _sdvo_cmd_name {
328 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), 337 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
329 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), 338 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
330 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), 339 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
340
331 /* Add the op code for SDVO enhancements */ 341 /* Add the op code for SDVO enhancements */
332 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_H), 342 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
333 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_H), 343 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
334 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_H), 344 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
335 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_POSITION_V), 345 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
336 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POSITION_V), 346 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
337 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_POSITION_V), 347 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
338 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), 348 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
339 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), 349 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
340 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), 350 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
@@ -353,6 +363,27 @@ static const struct _sdvo_cmd_name {
353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), 363 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
354 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), 364 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
355 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), 365 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
366 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
367 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
368 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
369 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
370 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
371 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
372 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
373 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
374 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
375 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
376 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
377 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
378 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
379 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
380 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
381 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
382 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
383 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
384 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
385 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
386
356 /* HDMI op code */ 387 /* HDMI op code */
357 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), 388 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
358 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), 389 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
@@ -377,17 +408,15 @@ static const struct _sdvo_cmd_name {
377}; 408};
378 409
379#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) 410#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
380#define SDVO_NAME(dev_priv) (IS_SDVOB((dev_priv)->sdvo_reg) ? "SDVOB" : "SDVOC") 411#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
381#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
382 412
383static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd, 413static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
384 void *args, int args_len) 414 const void *args, int args_len)
385{ 415{
386 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
387 int i; 416 int i;
388 417
389 DRM_DEBUG_KMS("%s: W: %02X ", 418 DRM_DEBUG_KMS("%s: W: %02X ",
390 SDVO_NAME(sdvo_priv), cmd); 419 SDVO_NAME(intel_sdvo), cmd);
391 for (i = 0; i < args_len; i++) 420 for (i = 0; i < args_len; i++)
392 DRM_LOG_KMS("%02X ", ((u8 *)args)[i]); 421 DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
393 for (; i < 8; i++) 422 for (; i < 8; i++)
@@ -403,19 +432,20 @@ static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
403 DRM_LOG_KMS("\n"); 432 DRM_LOG_KMS("\n");
404} 433}
405 434
406static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd, 435static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
407 void *args, int args_len) 436 const void *args, int args_len)
408{ 437{
409 int i; 438 int i;
410 439
411 intel_sdvo_debug_write(intel_encoder, cmd, args, args_len); 440 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
412 441
413 for (i = 0; i < args_len; i++) { 442 for (i = 0; i < args_len; i++) {
414 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i, 443 if (!intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0 - i,
415 ((u8*)args)[i]); 444 ((u8*)args)[i]))
445 return false;
416 } 446 }
417 447
418 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd); 448 return intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_OPCODE, cmd);
419} 449}
420 450
421static const char *cmd_status_names[] = { 451static const char *cmd_status_names[] = {
@@ -428,14 +458,13 @@ static const char *cmd_status_names[] = {
428 "Scaling not supported" 458 "Scaling not supported"
429}; 459};
430 460
431static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder, 461static void intel_sdvo_debug_response(struct intel_sdvo *intel_sdvo,
432 void *response, int response_len, 462 void *response, int response_len,
433 u8 status) 463 u8 status)
434{ 464{
435 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
436 int i; 465 int i;
437 466
438 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); 467 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
439 for (i = 0; i < response_len; i++) 468 for (i = 0; i < response_len; i++)
440 DRM_LOG_KMS("%02X ", ((u8 *)response)[i]); 469 DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
441 for (; i < 8; i++) 470 for (; i < 8; i++)
@@ -447,8 +476,8 @@ static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
447 DRM_LOG_KMS("\n"); 476 DRM_LOG_KMS("\n");
448} 477}
449 478
450static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder, 479static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
451 void *response, int response_len) 480 void *response, int response_len)
452{ 481{
453 int i; 482 int i;
454 u8 status; 483 u8 status;
@@ -457,24 +486,26 @@ static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
457 while (retry--) { 486 while (retry--) {
458 /* Read the command response */ 487 /* Read the command response */
459 for (i = 0; i < response_len; i++) { 488 for (i = 0; i < response_len; i++) {
460 intel_sdvo_read_byte(intel_encoder, 489 if (!intel_sdvo_read_byte(intel_sdvo,
461 SDVO_I2C_RETURN_0 + i, 490 SDVO_I2C_RETURN_0 + i,
462 &((u8 *)response)[i]); 491 &((u8 *)response)[i]))
492 return false;
463 } 493 }
464 494
465 /* read the return status */ 495 /* read the return status */
466 intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS, 496 if (!intel_sdvo_read_byte(intel_sdvo, SDVO_I2C_CMD_STATUS,
467 &status); 497 &status))
498 return false;
468 499
469 intel_sdvo_debug_response(intel_encoder, response, response_len, 500 intel_sdvo_debug_response(intel_sdvo, response, response_len,
470 status); 501 status);
471 if (status != SDVO_CMD_STATUS_PENDING) 502 if (status != SDVO_CMD_STATUS_PENDING)
472 return status; 503 break;
473 504
474 mdelay(50); 505 mdelay(50);
475 } 506 }
476 507
477 return status; 508 return status == SDVO_CMD_STATUS_SUCCESS;
478} 509}
479 510
480static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) 511static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
@@ -494,37 +525,36 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
494 * another I2C transaction after issuing the DDC bus switch, it will be 525 * another I2C transaction after issuing the DDC bus switch, it will be
495 * switched to the internal SDVO register. 526 * switched to the internal SDVO register.
496 */ 527 */
497static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder, 528static void intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
498 u8 target) 529 u8 target)
499{ 530{
500 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
501 u8 out_buf[2], cmd_buf[2], ret_value[2], ret; 531 u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
502 struct i2c_msg msgs[] = { 532 struct i2c_msg msgs[] = {
503 { 533 {
504 .addr = sdvo_priv->slave_addr >> 1, 534 .addr = intel_sdvo->slave_addr >> 1,
505 .flags = 0, 535 .flags = 0,
506 .len = 2, 536 .len = 2,
507 .buf = out_buf, 537 .buf = out_buf,
508 }, 538 },
509 /* the following two are to read the response */ 539 /* the following two are to read the response */
510 { 540 {
511 .addr = sdvo_priv->slave_addr >> 1, 541 .addr = intel_sdvo->slave_addr >> 1,
512 .flags = 0, 542 .flags = 0,
513 .len = 1, 543 .len = 1,
514 .buf = cmd_buf, 544 .buf = cmd_buf,
515 }, 545 },
516 { 546 {
517 .addr = sdvo_priv->slave_addr >> 1, 547 .addr = intel_sdvo->slave_addr >> 1,
518 .flags = I2C_M_RD, 548 .flags = I2C_M_RD,
519 .len = 1, 549 .len = 1,
520 .buf = ret_value, 550 .buf = ret_value,
521 }, 551 },
522 }; 552 };
523 553
524 intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH, 554 intel_sdvo_debug_write(intel_sdvo, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
525 &target, 1); 555 &target, 1);
526 /* write the DDC switch command argument */ 556 /* write the DDC switch command argument */
527 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target); 557 intel_sdvo_write_byte(intel_sdvo, SDVO_I2C_ARG_0, target);
528 558
529 out_buf[0] = SDVO_I2C_OPCODE; 559 out_buf[0] = SDVO_I2C_OPCODE;
530 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; 560 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -533,7 +563,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode
533 ret_value[0] = 0; 563 ret_value[0] = 0;
534 ret_value[1] = 0; 564 ret_value[1] = 0;
535 565
536 ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3); 566 ret = i2c_transfer(intel_sdvo->base.i2c_bus, msgs, 3);
537 if (ret != 3) { 567 if (ret != 3) {
538 /* failure in I2C transfer */ 568 /* failure in I2C transfer */
539 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); 569 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -547,23 +577,29 @@ static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encode
547 return; 577 return;
548} 578}
549 579
550static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1) 580static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
551{ 581{
552 struct intel_sdvo_set_target_input_args targets = {0}; 582 if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
553 u8 status; 583 return false;
554
555 if (target_0 && target_1)
556 return SDVO_CMD_STATUS_NOTSUPP;
557 584
558 if (target_1) 585 return intel_sdvo_read_response(intel_sdvo, NULL, 0);
559 targets.target_1 = 1; 586}
560 587
561 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets, 588static bool
562 sizeof(targets)); 589intel_sdvo_get_value(struct intel_sdvo *intel_sdvo, u8 cmd, void *value, int len)
590{
591 if (!intel_sdvo_write_cmd(intel_sdvo, cmd, NULL, 0))
592 return false;
563 593
564 status = intel_sdvo_read_response(intel_encoder, NULL, 0); 594 return intel_sdvo_read_response(intel_sdvo, value, len);
595}
565 596
566 return (status == SDVO_CMD_STATUS_SUCCESS); 597static bool intel_sdvo_set_target_input(struct intel_sdvo *intel_sdvo)
598{
599 struct intel_sdvo_set_target_input_args targets = {0};
600 return intel_sdvo_set_value(intel_sdvo,
601 SDVO_CMD_SET_TARGET_INPUT,
602 &targets, sizeof(targets));
567} 603}
568 604
569/** 605/**
@@ -572,14 +608,12 @@ static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, boo
572 * This function is making an assumption about the layout of the response, 608 * This function is making an assumption about the layout of the response,
573 * which should be checked against the docs. 609 * which should be checked against the docs.
574 */ 610 */
575static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2) 611static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *input_1, bool *input_2)
576{ 612{
577 struct intel_sdvo_get_trained_inputs_response response; 613 struct intel_sdvo_get_trained_inputs_response response;
578 u8 status;
579 614
580 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); 615 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
581 status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response)); 616 &response, sizeof(response)))
582 if (status != SDVO_CMD_STATUS_SUCCESS)
583 return false; 617 return false;
584 618
585 *input_1 = response.input0_trained; 619 *input_1 = response.input0_trained;
@@ -587,21 +621,18 @@ static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, b
587 return true; 621 return true;
588} 622}
589 623
590static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder, 624static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
591 u16 outputs) 625 u16 outputs)
592{ 626{
593 u8 status; 627 return intel_sdvo_set_value(intel_sdvo,
594 628 SDVO_CMD_SET_ACTIVE_OUTPUTS,
595 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, 629 &outputs, sizeof(outputs));
596 sizeof(outputs));
597 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
598 return (status == SDVO_CMD_STATUS_SUCCESS);
599} 630}
600 631
601static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder, 632static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
602 int mode) 633 int mode)
603{ 634{
604 u8 status, state = SDVO_ENCODER_STATE_ON; 635 u8 state = SDVO_ENCODER_STATE_ON;
605 636
606 switch (mode) { 637 switch (mode) {
607 case DRM_MODE_DPMS_ON: 638 case DRM_MODE_DPMS_ON:
@@ -618,88 +649,63 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encod
618 break; 649 break;
619 } 650 }
620 651
621 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, 652 return intel_sdvo_set_value(intel_sdvo,
622 sizeof(state)); 653 SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
623 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
624
625 return (status == SDVO_CMD_STATUS_SUCCESS);
626} 654}
627 655
628static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder, 656static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo,
629 int *clock_min, 657 int *clock_min,
630 int *clock_max) 658 int *clock_max)
631{ 659{
632 struct intel_sdvo_pixel_clock_range clocks; 660 struct intel_sdvo_pixel_clock_range clocks;
633 u8 status;
634
635 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
636 NULL, 0);
637
638 status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
639 661
640 if (status != SDVO_CMD_STATUS_SUCCESS) 662 if (!intel_sdvo_get_value(intel_sdvo,
663 SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
664 &clocks, sizeof(clocks)))
641 return false; 665 return false;
642 666
643 /* Convert the values from units of 10 kHz to kHz. */ 667 /* Convert the values from units of 10 kHz to kHz. */
644 *clock_min = clocks.min * 10; 668 *clock_min = clocks.min * 10;
645 *clock_max = clocks.max * 10; 669 *clock_max = clocks.max * 10;
646
647 return true; 670 return true;
648} 671}
649 672
650static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder, 673static bool intel_sdvo_set_target_output(struct intel_sdvo *intel_sdvo,
651 u16 outputs) 674 u16 outputs)
652{ 675{
653 u8 status; 676 return intel_sdvo_set_value(intel_sdvo,
654 677 SDVO_CMD_SET_TARGET_OUTPUT,
655 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, 678 &outputs, sizeof(outputs));
656 sizeof(outputs));
657
658 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
659 return (status == SDVO_CMD_STATUS_SUCCESS);
660} 679}
661 680
662static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd, 681static bool intel_sdvo_set_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
663 struct intel_sdvo_dtd *dtd) 682 struct intel_sdvo_dtd *dtd)
664{ 683{
665 u8 status; 684 return intel_sdvo_set_value(intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
666 685 intel_sdvo_set_value(intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
667 intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
668 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
669 if (status != SDVO_CMD_STATUS_SUCCESS)
670 return false;
671
672 intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
673 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
674 if (status != SDVO_CMD_STATUS_SUCCESS)
675 return false;
676
677 return true;
678} 686}
679 687
680static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder, 688static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
681 struct intel_sdvo_dtd *dtd) 689 struct intel_sdvo_dtd *dtd)
682{ 690{
683 return intel_sdvo_set_timing(intel_encoder, 691 return intel_sdvo_set_timing(intel_sdvo,
684 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); 692 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
685} 693}
686 694
687static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder, 695static bool intel_sdvo_set_output_timing(struct intel_sdvo *intel_sdvo,
688 struct intel_sdvo_dtd *dtd) 696 struct intel_sdvo_dtd *dtd)
689{ 697{
690 return intel_sdvo_set_timing(intel_encoder, 698 return intel_sdvo_set_timing(intel_sdvo,
691 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); 699 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
692} 700}
693 701
694static bool 702static bool
695intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder, 703intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
696 uint16_t clock, 704 uint16_t clock,
697 uint16_t width, 705 uint16_t width,
698 uint16_t height) 706 uint16_t height)
699{ 707{
700 struct intel_sdvo_preferred_input_timing_args args; 708 struct intel_sdvo_preferred_input_timing_args args;
701 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
702 uint8_t status;
703 709
704 memset(&args, 0, sizeof(args)); 710 memset(&args, 0, sizeof(args));
705 args.clock = clock; 711 args.clock = clock;
@@ -707,59 +713,32 @@ intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
707 args.height = height; 713 args.height = height;
708 args.interlace = 0; 714 args.interlace = 0;
709 715
710 if (sdvo_priv->is_lvds && 716 if (intel_sdvo->is_lvds &&
711 (sdvo_priv->sdvo_lvds_fixed_mode->hdisplay != width || 717 (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
712 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) 718 intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
713 args.scaled = 1; 719 args.scaled = 1;
714 720
715 intel_sdvo_write_cmd(intel_encoder, 721 return intel_sdvo_set_value(intel_sdvo,
716 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, 722 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
717 &args, sizeof(args)); 723 &args, sizeof(args));
718 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
719 if (status != SDVO_CMD_STATUS_SUCCESS)
720 return false;
721
722 return true;
723} 724}
724 725
725static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder, 726static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
726 struct intel_sdvo_dtd *dtd) 727 struct intel_sdvo_dtd *dtd)
727{ 728{
728 bool status; 729 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
729 730 &dtd->part1, sizeof(dtd->part1)) &&
730 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, 731 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
731 NULL, 0); 732 &dtd->part2, sizeof(dtd->part2));
732
733 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
734 sizeof(dtd->part1));
735 if (status != SDVO_CMD_STATUS_SUCCESS)
736 return false;
737
738 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
739 NULL, 0);
740
741 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
742 sizeof(dtd->part2));
743 if (status != SDVO_CMD_STATUS_SUCCESS)
744 return false;
745
746 return false;
747} 733}
748 734
749static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val) 735static bool intel_sdvo_set_clock_rate_mult(struct intel_sdvo *intel_sdvo, u8 val)
750{ 736{
751 u8 status; 737 return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
752
753 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
754 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
755 if (status != SDVO_CMD_STATUS_SUCCESS)
756 return false;
757
758 return true;
759} 738}
760 739
761static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, 740static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
762 struct drm_display_mode *mode) 741 const struct drm_display_mode *mode)
763{ 742{
764 uint16_t width, height; 743 uint16_t width, height;
765 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; 744 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
@@ -808,7 +787,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
808} 787}
809 788
810static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, 789static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
811 struct intel_sdvo_dtd *dtd) 790 const struct intel_sdvo_dtd *dtd)
812{ 791{
813 mode->hdisplay = dtd->part1.h_active; 792 mode->hdisplay = dtd->part1.h_active;
814 mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8; 793 mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
@@ -840,45 +819,33 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
840 mode->flags |= DRM_MODE_FLAG_PVSYNC; 819 mode->flags |= DRM_MODE_FLAG_PVSYNC;
841} 820}
842 821
843static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder, 822static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo,
844 struct intel_sdvo_encode *encode) 823 struct intel_sdvo_encode *encode)
845{ 824{
846 uint8_t status; 825 if (intel_sdvo_get_value(intel_sdvo,
847 826 SDVO_CMD_GET_SUPP_ENCODE,
848 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); 827 encode, sizeof(*encode)))
849 status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode)); 828 return true;
850 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
851 memset(encode, 0, sizeof(*encode));
852 return false;
853 }
854 829
855 return true; 830 /* non-support means DVI */
831 memset(encode, 0, sizeof(*encode));
832 return false;
856} 833}
857 834
858static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder, 835static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
859 uint8_t mode) 836 uint8_t mode)
860{ 837{
861 uint8_t status; 838 return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
862
863 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
864 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
865
866 return (status == SDVO_CMD_STATUS_SUCCESS);
867} 839}
868 840
869static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder, 841static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
870 uint8_t mode) 842 uint8_t mode)
871{ 843{
872 uint8_t status; 844 return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
873
874 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
875 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
876
877 return (status == SDVO_CMD_STATUS_SUCCESS);
878} 845}
879 846
880#if 0 847#if 0
881static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder) 848static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
882{ 849{
883 int i, j; 850 int i, j;
884 uint8_t set_buf_index[2]; 851 uint8_t set_buf_index[2];
@@ -887,8 +854,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
887 uint8_t buf[48]; 854 uint8_t buf[48];
888 uint8_t *pos; 855 uint8_t *pos;
889 856
890 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); 857 intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
891 intel_sdvo_read_response(encoder, &av_split, 1);
892 858
893 for (i = 0; i <= av_split; i++) { 859 for (i = 0; i <= av_split; i++) {
894 set_buf_index[0] = i; set_buf_index[1] = 0; 860 set_buf_index[0] = i; set_buf_index[1] = 0;
@@ -908,7 +874,7 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
908} 874}
909#endif 875#endif
910 876
911static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder, 877static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo,
912 int index, 878 int index,
913 uint8_t *data, int8_t size, uint8_t tx_rate) 879 uint8_t *data, int8_t size, uint8_t tx_rate)
914{ 880{
@@ -917,15 +883,18 @@ static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
917 set_buf_index[0] = index; 883 set_buf_index[0] = index;
918 set_buf_index[1] = 0; 884 set_buf_index[1] = 0;
919 885
920 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX, 886 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
921 set_buf_index, 2); 887 set_buf_index, 2))
888 return false;
922 889
923 for (; size > 0; size -= 8) { 890 for (; size > 0; size -= 8) {
924 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8); 891 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8))
892 return false;
893
925 data += 8; 894 data += 8;
926 } 895 }
927 896
928 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); 897 return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
929} 898}
930 899
931static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) 900static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -1000,7 +969,7 @@ struct dip_infoframe {
1000 } __attribute__ ((packed)) u; 969 } __attribute__ ((packed)) u;
1001} __attribute__((packed)); 970} __attribute__((packed));
1002 971
1003static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder, 972static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
1004 struct drm_display_mode * mode) 973 struct drm_display_mode * mode)
1005{ 974{
1006 struct dip_infoframe avi_if = { 975 struct dip_infoframe avi_if = {
@@ -1011,133 +980,107 @@ static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
1011 980
1012 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, 981 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
1013 4 + avi_if.len); 982 4 + avi_if.len);
1014 intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if, 983 return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if,
1015 4 + avi_if.len, 984 4 + avi_if.len,
1016 SDVO_HBUF_TX_VSYNC); 985 SDVO_HBUF_TX_VSYNC);
1017} 986}
1018 987
1019static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder) 988static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
1020{ 989{
1021
1022 struct intel_sdvo_tv_format format; 990 struct intel_sdvo_tv_format format;
1023 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 991 uint32_t format_map;
1024 uint32_t format_map, i;
1025 uint8_t status;
1026
1027 for (i = 0; i < TV_FORMAT_NUM; i++)
1028 if (tv_format_names[i] == sdvo_priv->tv_format_name)
1029 break;
1030 992
1031 format_map = 1 << i; 993 format_map = 1 << intel_sdvo->tv_format_index;
1032 memset(&format, 0, sizeof(format)); 994 memset(&format, 0, sizeof(format));
1033 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? 995 memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
1034 sizeof(format) : sizeof(format_map));
1035 996
1036 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format, 997 BUILD_BUG_ON(sizeof(format) != 6);
1037 sizeof(format)); 998 return intel_sdvo_set_value(intel_sdvo,
1038 999 SDVO_CMD_SET_TV_FORMAT,
1039 status = intel_sdvo_read_response(intel_encoder, NULL, 0); 1000 &format, sizeof(format));
1040 if (status != SDVO_CMD_STATUS_SUCCESS)
1041 DRM_DEBUG_KMS("%s: Failed to set TV format\n",
1042 SDVO_NAME(sdvo_priv));
1043} 1001}
1044 1002
1045static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, 1003static bool
1046 struct drm_display_mode *mode, 1004intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
1047 struct drm_display_mode *adjusted_mode) 1005 struct drm_display_mode *mode)
1048{ 1006{
1049 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1007 struct intel_sdvo_dtd output_dtd;
1050 struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
1051 1008
1052 if (dev_priv->is_tv) { 1009 if (!intel_sdvo_set_target_output(intel_sdvo,
1053 struct intel_sdvo_dtd output_dtd; 1010 intel_sdvo->attached_output))
1054 bool success; 1011 return false;
1055 1012
1056 /* We need to construct preferred input timings based on our 1013 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1057 * output timings. To do that, we have to set the output 1014 if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
1058 * timings, even though this isn't really the right place in 1015 return false;
1059 * the sequence to do it. Oh well.
1060 */
1061 1016
1017 return true;
1018}
1062 1019
1063 /* Set output timings */ 1020static bool
1064 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1021intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
1065 intel_sdvo_set_target_output(intel_encoder, 1022 struct drm_display_mode *mode,
1066 dev_priv->attached_output); 1023 struct drm_display_mode *adjusted_mode)
1067 intel_sdvo_set_output_timing(intel_encoder, &output_dtd); 1024{
1025 struct intel_sdvo_dtd input_dtd;
1068 1026
1069 /* Set the input timing to the screen. Assume always input 0. */ 1027 /* Reset the input timing to the screen. Assume always input 0. */
1070 intel_sdvo_set_target_input(intel_encoder, true, false); 1028 if (!intel_sdvo_set_target_input(intel_sdvo))
1029 return false;
1071 1030
1031 if (!intel_sdvo_create_preferred_input_timing(intel_sdvo,
1032 mode->clock / 10,
1033 mode->hdisplay,
1034 mode->vdisplay))
1035 return false;
1072 1036
1073 success = intel_sdvo_create_preferred_input_timing(intel_encoder, 1037 if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
1074 mode->clock / 10, 1038 &input_dtd))
1075 mode->hdisplay, 1039 return false;
1076 mode->vdisplay);
1077 if (success) {
1078 struct intel_sdvo_dtd input_dtd;
1079 1040
1080 intel_sdvo_get_preferred_input_timing(intel_encoder, 1041 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1081 &input_dtd); 1042 intel_sdvo->sdvo_flags = input_dtd.part2.sdvo_flags;
1082 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1083 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
1084 1043
1085 drm_mode_set_crtcinfo(adjusted_mode, 0); 1044 drm_mode_set_crtcinfo(adjusted_mode, 0);
1045 mode->clock = adjusted_mode->clock;
1046 return true;
1047}
1086 1048
1087 mode->clock = adjusted_mode->clock; 1049static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1050 struct drm_display_mode *mode,
1051 struct drm_display_mode *adjusted_mode)
1052{
1053 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1088 1054
1089 adjusted_mode->clock *= 1055 /* We need to construct preferred input timings based on our
1090 intel_sdvo_get_pixel_multiplier(mode); 1056 * output timings. To do that, we have to set the output
1091 } else { 1057 * timings, even though this isn't really the right place in
1058 * the sequence to do it. Oh well.
1059 */
1060 if (intel_sdvo->is_tv) {
1061 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
1092 return false; 1062 return false;
1093 }
1094 } else if (dev_priv->is_lvds) {
1095 struct intel_sdvo_dtd output_dtd;
1096 bool success;
1097
1098 drm_mode_set_crtcinfo(dev_priv->sdvo_lvds_fixed_mode, 0);
1099 /* Set output timings */
1100 intel_sdvo_get_dtd_from_mode(&output_dtd,
1101 dev_priv->sdvo_lvds_fixed_mode);
1102
1103 intel_sdvo_set_target_output(intel_encoder,
1104 dev_priv->attached_output);
1105 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1106
1107 /* Set the input timing to the screen. Assume always input 0. */
1108 intel_sdvo_set_target_input(intel_encoder, true, false);
1109
1110
1111 success = intel_sdvo_create_preferred_input_timing(
1112 intel_encoder,
1113 mode->clock / 10,
1114 mode->hdisplay,
1115 mode->vdisplay);
1116
1117 if (success) {
1118 struct intel_sdvo_dtd input_dtd;
1119
1120 intel_sdvo_get_preferred_input_timing(intel_encoder,
1121 &input_dtd);
1122 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1123 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
1124 1063
1125 drm_mode_set_crtcinfo(adjusted_mode, 0); 1064 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1065 mode,
1066 adjusted_mode);
1067 } else if (intel_sdvo->is_lvds) {
1068 drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode, 0);
1126 1069
1127 mode->clock = adjusted_mode->clock; 1070 if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
1128 1071 intel_sdvo->sdvo_lvds_fixed_mode))
1129 adjusted_mode->clock *=
1130 intel_sdvo_get_pixel_multiplier(mode);
1131 } else {
1132 return false; 1072 return false;
1133 }
1134 1073
1135 } else { 1074 (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
1136 /* Make the CRTC code factor in the SDVO pixel multiplier. The 1075 mode,
1137 * SDVO device will be told of the multiplier during mode_set. 1076 adjusted_mode);
1138 */
1139 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
1140 } 1077 }
1078
1079 /* Make the CRTC code factor in the SDVO pixel multiplier. The
1080 * SDVO device will be told of the multiplier during mode_set.
1081 */
1082 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
1083
1141 return true; 1084 return true;
1142} 1085}
1143 1086
@@ -1149,13 +1092,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1149 struct drm_i915_private *dev_priv = dev->dev_private; 1092 struct drm_i915_private *dev_priv = dev->dev_private;
1150 struct drm_crtc *crtc = encoder->crtc; 1093 struct drm_crtc *crtc = encoder->crtc;
1151 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1152 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1095 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1153 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1154 u32 sdvox = 0; 1096 u32 sdvox = 0;
1155 int sdvo_pixel_multiply; 1097 int sdvo_pixel_multiply, rate;
1156 struct intel_sdvo_in_out_map in_out; 1098 struct intel_sdvo_in_out_map in_out;
1157 struct intel_sdvo_dtd input_dtd; 1099 struct intel_sdvo_dtd input_dtd;
1158 u8 status;
1159 1100
1160 if (!mode) 1101 if (!mode)
1161 return; 1102 return;
@@ -1166,41 +1107,46 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1166 * channel on the motherboard. In a two-input device, the first input 1107 * channel on the motherboard. In a two-input device, the first input
1167 * will be SDVOB and the second SDVOC. 1108 * will be SDVOB and the second SDVOC.
1168 */ 1109 */
1169 in_out.in0 = sdvo_priv->attached_output; 1110 in_out.in0 = intel_sdvo->attached_output;
1170 in_out.in1 = 0; 1111 in_out.in1 = 0;
1171 1112
1172 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP, 1113 intel_sdvo_set_value(intel_sdvo,
1114 SDVO_CMD_SET_IN_OUT_MAP,
1173 &in_out, sizeof(in_out)); 1115 &in_out, sizeof(in_out));
1174 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
1175 1116
1176 if (sdvo_priv->is_hdmi) { 1117 if (intel_sdvo->is_hdmi) {
1177 intel_sdvo_set_avi_infoframe(intel_encoder, mode); 1118 if (!intel_sdvo_set_avi_infoframe(intel_sdvo, mode))
1119 return;
1120
1178 sdvox |= SDVO_AUDIO_ENABLE; 1121 sdvox |= SDVO_AUDIO_ENABLE;
1179 } 1122 }
1180 1123
1181 /* We have tried to get input timing in mode_fixup, and filled into 1124 /* We have tried to get input timing in mode_fixup, and filled into
1182 adjusted_mode */ 1125 adjusted_mode */
1183 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 1126 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
1184 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1127 if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
1185 input_dtd.part2.sdvo_flags = sdvo_priv->sdvo_flags; 1128 input_dtd.part2.sdvo_flags = intel_sdvo->sdvo_flags;
1186 } else
1187 intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
1188 1129
1189 /* If it's a TV, we already set the output timing in mode_fixup. 1130 /* If it's a TV, we already set the output timing in mode_fixup.
1190 * Otherwise, the output timing is equal to the input timing. 1131 * Otherwise, the output timing is equal to the input timing.
1191 */ 1132 */
1192 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { 1133 if (!intel_sdvo->is_tv && !intel_sdvo->is_lvds) {
1193 /* Set the output timing to the screen */ 1134 /* Set the output timing to the screen */
1194 intel_sdvo_set_target_output(intel_encoder, 1135 if (!intel_sdvo_set_target_output(intel_sdvo,
1195 sdvo_priv->attached_output); 1136 intel_sdvo->attached_output))
1196 intel_sdvo_set_output_timing(intel_encoder, &input_dtd); 1137 return;
1138
1139 (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
1197 } 1140 }
1198 1141
1199 /* Set the input timing to the screen. Assume always input 0. */ 1142 /* Set the input timing to the screen. Assume always input 0. */
1200 intel_sdvo_set_target_input(intel_encoder, true, false); 1143 if (!intel_sdvo_set_target_input(intel_sdvo))
1144 return;
1201 1145
1202 if (sdvo_priv->is_tv) 1146 if (intel_sdvo->is_tv) {
1203 intel_sdvo_set_tv_format(intel_encoder); 1147 if (!intel_sdvo_set_tv_format(intel_sdvo))
1148 return;
1149 }
1204 1150
1205 /* We would like to use intel_sdvo_create_preferred_input_timing() to 1151 /* We would like to use intel_sdvo_create_preferred_input_timing() to
1206 * provide the device with a timing it can support, if it supports that 1152 * provide the device with a timing it can support, if it supports that
@@ -1217,23 +1163,17 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1217 intel_sdvo_set_input_timing(encoder, &input_dtd); 1163 intel_sdvo_set_input_timing(encoder, &input_dtd);
1218 } 1164 }
1219#else 1165#else
1220 intel_sdvo_set_input_timing(intel_encoder, &input_dtd); 1166 (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
1221#endif 1167#endif
1222 1168
1223 switch (intel_sdvo_get_pixel_multiplier(mode)) { 1169 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
1224 case 1: 1170 switch (sdvo_pixel_multiply) {
1225 intel_sdvo_set_clock_rate_mult(intel_encoder, 1171 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
1226 SDVO_CLOCK_RATE_MULT_1X); 1172 case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
1227 break; 1173 case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
1228 case 2:
1229 intel_sdvo_set_clock_rate_mult(intel_encoder,
1230 SDVO_CLOCK_RATE_MULT_2X);
1231 break;
1232 case 4:
1233 intel_sdvo_set_clock_rate_mult(intel_encoder,
1234 SDVO_CLOCK_RATE_MULT_4X);
1235 break;
1236 } 1174 }
1175 if (!intel_sdvo_set_clock_rate_mult(intel_sdvo, rate))
1176 return;
1237 1177
1238 /* Set the SDVO control regs. */ 1178 /* Set the SDVO control regs. */
1239 if (IS_I965G(dev)) { 1179 if (IS_I965G(dev)) {
@@ -1243,8 +1183,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1243 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1183 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1244 sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 1184 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
1245 } else { 1185 } else {
1246 sdvox |= I915_READ(sdvo_priv->sdvo_reg); 1186 sdvox |= I915_READ(intel_sdvo->sdvo_reg);
1247 switch (sdvo_priv->sdvo_reg) { 1187 switch (intel_sdvo->sdvo_reg) {
1248 case SDVOB: 1188 case SDVOB:
1249 sdvox &= SDVOB_PRESERVE_MASK; 1189 sdvox &= SDVOB_PRESERVE_MASK;
1250 break; 1190 break;
@@ -1257,7 +1197,6 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1257 if (intel_crtc->pipe == 1) 1197 if (intel_crtc->pipe == 1)
1258 sdvox |= SDVO_PIPE_B_SELECT; 1198 sdvox |= SDVO_PIPE_B_SELECT;
1259 1199
1260 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
1261 if (IS_I965G(dev)) { 1200 if (IS_I965G(dev)) {
1262 /* done in crtc_mode_set as the dpll_md reg must be written early */ 1201 /* done in crtc_mode_set as the dpll_md reg must be written early */
1263 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 1202 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
@@ -1266,28 +1205,28 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1266 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; 1205 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
1267 } 1206 }
1268 1207
1269 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) 1208 if (intel_sdvo->sdvo_flags & SDVO_NEED_TO_STALL)
1270 sdvox |= SDVO_STALL_SELECT; 1209 sdvox |= SDVO_STALL_SELECT;
1271 intel_sdvo_write_sdvox(intel_encoder, sdvox); 1210 intel_sdvo_write_sdvox(intel_sdvo, sdvox);
1272} 1211}
1273 1212
1274static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 1213static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1275{ 1214{
1276 struct drm_device *dev = encoder->dev; 1215 struct drm_device *dev = encoder->dev;
1277 struct drm_i915_private *dev_priv = dev->dev_private; 1216 struct drm_i915_private *dev_priv = dev->dev_private;
1278 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1217 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1279 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1218 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1280 u32 temp; 1219 u32 temp;
1281 1220
1282 if (mode != DRM_MODE_DPMS_ON) { 1221 if (mode != DRM_MODE_DPMS_ON) {
1283 intel_sdvo_set_active_outputs(intel_encoder, 0); 1222 intel_sdvo_set_active_outputs(intel_sdvo, 0);
1284 if (0) 1223 if (0)
1285 intel_sdvo_set_encoder_power_state(intel_encoder, mode); 1224 intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
1286 1225
1287 if (mode == DRM_MODE_DPMS_OFF) { 1226 if (mode == DRM_MODE_DPMS_OFF) {
1288 temp = I915_READ(sdvo_priv->sdvo_reg); 1227 temp = I915_READ(intel_sdvo->sdvo_reg);
1289 if ((temp & SDVO_ENABLE) != 0) { 1228 if ((temp & SDVO_ENABLE) != 0) {
1290 intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE); 1229 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1291 } 1230 }
1292 } 1231 }
1293 } else { 1232 } else {
@@ -1295,28 +1234,25 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1295 int i; 1234 int i;
1296 u8 status; 1235 u8 status;
1297 1236
1298 temp = I915_READ(sdvo_priv->sdvo_reg); 1237 temp = I915_READ(intel_sdvo->sdvo_reg);
1299 if ((temp & SDVO_ENABLE) == 0) 1238 if ((temp & SDVO_ENABLE) == 0)
1300 intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE); 1239 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1301 for (i = 0; i < 2; i++) 1240 for (i = 0; i < 2; i++)
1302 intel_wait_for_vblank(dev); 1241 intel_wait_for_vblank(dev, intel_crtc->pipe);
1303
1304 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
1305 &input2);
1306
1307 1242
1243 status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
1308 /* Warn if the device reported failure to sync. 1244 /* Warn if the device reported failure to sync.
1309 * A lot of SDVO devices fail to notify of sync, but it's 1245 * A lot of SDVO devices fail to notify of sync, but it's
1310 * a given it the status is a success, we succeeded. 1246 * a given it the status is a success, we succeeded.
1311 */ 1247 */
1312 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) { 1248 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
1313 DRM_DEBUG_KMS("First %s output reported failure to " 1249 DRM_DEBUG_KMS("First %s output reported failure to "
1314 "sync\n", SDVO_NAME(sdvo_priv)); 1250 "sync\n", SDVO_NAME(intel_sdvo));
1315 } 1251 }
1316 1252
1317 if (0) 1253 if (0)
1318 intel_sdvo_set_encoder_power_state(intel_encoder, mode); 1254 intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
1319 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->attached_output); 1255 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
1320 } 1256 }
1321 return; 1257 return;
1322} 1258}
@@ -1325,42 +1261,31 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
1325 struct drm_display_mode *mode) 1261 struct drm_display_mode *mode)
1326{ 1262{
1327 struct drm_encoder *encoder = intel_attached_encoder(connector); 1263 struct drm_encoder *encoder = intel_attached_encoder(connector);
1328 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1264 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1329 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1330 1265
1331 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1266 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1332 return MODE_NO_DBLESCAN; 1267 return MODE_NO_DBLESCAN;
1333 1268
1334 if (sdvo_priv->pixel_clock_min > mode->clock) 1269 if (intel_sdvo->pixel_clock_min > mode->clock)
1335 return MODE_CLOCK_LOW; 1270 return MODE_CLOCK_LOW;
1336 1271
1337 if (sdvo_priv->pixel_clock_max < mode->clock) 1272 if (intel_sdvo->pixel_clock_max < mode->clock)
1338 return MODE_CLOCK_HIGH; 1273 return MODE_CLOCK_HIGH;
1339 1274
1340 if (sdvo_priv->is_lvds == true) { 1275 if (intel_sdvo->is_lvds) {
1341 if (sdvo_priv->sdvo_lvds_fixed_mode == NULL) 1276 if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
1342 return MODE_PANEL; 1277 return MODE_PANEL;
1343 1278
1344 if (mode->hdisplay > sdvo_priv->sdvo_lvds_fixed_mode->hdisplay) 1279 if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
1345 return MODE_PANEL;
1346
1347 if (mode->vdisplay > sdvo_priv->sdvo_lvds_fixed_mode->vdisplay)
1348 return MODE_PANEL; 1280 return MODE_PANEL;
1349 } 1281 }
1350 1282
1351 return MODE_OK; 1283 return MODE_OK;
1352} 1284}
1353 1285
1354static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps) 1286static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
1355{ 1287{
1356 u8 status; 1288 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DEVICE_CAPS, caps, sizeof(*caps));
1357
1358 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
1359 status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
1360 if (status != SDVO_CMD_STATUS_SUCCESS)
1361 return false;
1362
1363 return true;
1364} 1289}
1365 1290
1366/* No use! */ 1291/* No use! */
@@ -1368,12 +1293,12 @@ static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, str
1368struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) 1293struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1369{ 1294{
1370 struct drm_connector *connector = NULL; 1295 struct drm_connector *connector = NULL;
1371 struct intel_encoder *iout = NULL; 1296 struct intel_sdvo *iout = NULL;
1372 struct intel_sdvo_priv *sdvo; 1297 struct intel_sdvo *sdvo;
1373 1298
1374 /* find the sdvo connector */ 1299 /* find the sdvo connector */
1375 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1300 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1376 iout = to_intel_encoder(connector); 1301 iout = to_intel_sdvo(connector);
1377 1302
1378 if (iout->type != INTEL_OUTPUT_SDVO) 1303 if (iout->type != INTEL_OUTPUT_SDVO)
1379 continue; 1304 continue;
@@ -1395,75 +1320,69 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
1395{ 1320{
1396 u8 response[2]; 1321 u8 response[2];
1397 u8 status; 1322 u8 status;
1398 struct intel_encoder *intel_encoder; 1323 struct intel_sdvo *intel_sdvo;
1399 DRM_DEBUG_KMS("\n"); 1324 DRM_DEBUG_KMS("\n");
1400 1325
1401 if (!connector) 1326 if (!connector)
1402 return 0; 1327 return 0;
1403 1328
1404 intel_encoder = to_intel_encoder(connector); 1329 intel_sdvo = to_intel_sdvo(connector);
1405
1406 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1407 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1408
1409 if (response[0] !=0)
1410 return 1;
1411 1330
1412 return 0; 1331 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1332 &response, 2) && response[0];
1413} 1333}
1414 1334
1415void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) 1335void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1416{ 1336{
1417 u8 response[2]; 1337 u8 response[2];
1418 u8 status; 1338 u8 status;
1419 struct intel_encoder *intel_encoder = to_intel_encoder(connector); 1339 struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
1420 1340
1421 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1341 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1422 intel_sdvo_read_response(intel_encoder, &response, 2); 1342 intel_sdvo_read_response(intel_sdvo, &response, 2);
1423 1343
1424 if (on) { 1344 if (on) {
1425 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1345 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1426 status = intel_sdvo_read_response(intel_encoder, &response, 2); 1346 status = intel_sdvo_read_response(intel_sdvo, &response, 2);
1427 1347
1428 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1348 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1429 } else { 1349 } else {
1430 response[0] = 0; 1350 response[0] = 0;
1431 response[1] = 0; 1351 response[1] = 0;
1432 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1352 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1433 } 1353 }
1434 1354
1435 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1355 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1436 intel_sdvo_read_response(intel_encoder, &response, 2); 1356 intel_sdvo_read_response(intel_sdvo, &response, 2);
1437} 1357}
1438#endif 1358#endif
1439 1359
1440static bool 1360static bool
1441intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder) 1361intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
1442{ 1362{
1443 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1444 int caps = 0; 1363 int caps = 0;
1445 1364
1446 if (sdvo_priv->caps.output_flags & 1365 if (intel_sdvo->caps.output_flags &
1447 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) 1366 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1448 caps++; 1367 caps++;
1449 if (sdvo_priv->caps.output_flags & 1368 if (intel_sdvo->caps.output_flags &
1450 (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) 1369 (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
1451 caps++; 1370 caps++;
1452 if (sdvo_priv->caps.output_flags & 1371 if (intel_sdvo->caps.output_flags &
1453 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1)) 1372 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
1454 caps++; 1373 caps++;
1455 if (sdvo_priv->caps.output_flags & 1374 if (intel_sdvo->caps.output_flags &
1456 (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) 1375 (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
1457 caps++; 1376 caps++;
1458 if (sdvo_priv->caps.output_flags & 1377 if (intel_sdvo->caps.output_flags &
1459 (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1)) 1378 (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
1460 caps++; 1379 caps++;
1461 1380
1462 if (sdvo_priv->caps.output_flags & 1381 if (intel_sdvo->caps.output_flags &
1463 (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1)) 1382 (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
1464 caps++; 1383 caps++;
1465 1384
1466 if (sdvo_priv->caps.output_flags & 1385 if (intel_sdvo->caps.output_flags &
1467 (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)) 1386 (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
1468 caps++; 1387 caps++;
1469 1388
@@ -1475,11 +1394,11 @@ intel_find_analog_connector(struct drm_device *dev)
1475{ 1394{
1476 struct drm_connector *connector; 1395 struct drm_connector *connector;
1477 struct drm_encoder *encoder; 1396 struct drm_encoder *encoder;
1478 struct intel_encoder *intel_encoder; 1397 struct intel_sdvo *intel_sdvo;
1479 1398
1480 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1399 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1481 intel_encoder = enc_to_intel_encoder(encoder); 1400 intel_sdvo = enc_to_intel_sdvo(encoder);
1482 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) { 1401 if (intel_sdvo->base.type == INTEL_OUTPUT_ANALOG) {
1483 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1402 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1484 if (encoder == intel_attached_encoder(connector)) 1403 if (encoder == intel_attached_encoder(connector))
1485 return connector; 1404 return connector;
@@ -1493,8 +1412,8 @@ static int
1493intel_analog_is_connected(struct drm_device *dev) 1412intel_analog_is_connected(struct drm_device *dev)
1494{ 1413{
1495 struct drm_connector *analog_connector; 1414 struct drm_connector *analog_connector;
1496 analog_connector = intel_find_analog_connector(dev);
1497 1415
1416 analog_connector = intel_find_analog_connector(dev);
1498 if (!analog_connector) 1417 if (!analog_connector)
1499 return false; 1418 return false;
1500 1419
@@ -1509,54 +1428,52 @@ enum drm_connector_status
1509intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) 1428intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1510{ 1429{
1511 struct drm_encoder *encoder = intel_attached_encoder(connector); 1430 struct drm_encoder *encoder = intel_attached_encoder(connector);
1512 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1431 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1513 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1432 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1514 struct intel_connector *intel_connector = to_intel_connector(connector);
1515 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1516 enum drm_connector_status status = connector_status_connected; 1433 enum drm_connector_status status = connector_status_connected;
1517 struct edid *edid = NULL; 1434 struct edid *edid = NULL;
1518 1435
1519 edid = drm_get_edid(connector, intel_encoder->ddc_bus); 1436 edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
1520 1437
1521 /* This is only applied to SDVO cards with multiple outputs */ 1438 /* This is only applied to SDVO cards with multiple outputs */
1522 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { 1439 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_sdvo)) {
1523 uint8_t saved_ddc, temp_ddc; 1440 uint8_t saved_ddc, temp_ddc;
1524 saved_ddc = sdvo_priv->ddc_bus; 1441 saved_ddc = intel_sdvo->ddc_bus;
1525 temp_ddc = sdvo_priv->ddc_bus >> 1; 1442 temp_ddc = intel_sdvo->ddc_bus >> 1;
1526 /* 1443 /*
1527 * Don't use the 1 as the argument of DDC bus switch to get 1444 * Don't use the 1 as the argument of DDC bus switch to get
1528 * the EDID. It is used for SDVO SPD ROM. 1445 * the EDID. It is used for SDVO SPD ROM.
1529 */ 1446 */
1530 while(temp_ddc > 1) { 1447 while(temp_ddc > 1) {
1531 sdvo_priv->ddc_bus = temp_ddc; 1448 intel_sdvo->ddc_bus = temp_ddc;
1532 edid = drm_get_edid(connector, intel_encoder->ddc_bus); 1449 edid = drm_get_edid(connector, intel_sdvo->base.ddc_bus);
1533 if (edid) { 1450 if (edid) {
1534 /* 1451 /*
1535 * When we can get the EDID, maybe it is the 1452 * When we can get the EDID, maybe it is the
1536 * correct DDC bus. Update it. 1453 * correct DDC bus. Update it.
1537 */ 1454 */
1538 sdvo_priv->ddc_bus = temp_ddc; 1455 intel_sdvo->ddc_bus = temp_ddc;
1539 break; 1456 break;
1540 } 1457 }
1541 temp_ddc >>= 1; 1458 temp_ddc >>= 1;
1542 } 1459 }
1543 if (edid == NULL) 1460 if (edid == NULL)
1544 sdvo_priv->ddc_bus = saved_ddc; 1461 intel_sdvo->ddc_bus = saved_ddc;
1545 } 1462 }
1546 /* when there is no edid and no monitor is connected with VGA 1463 /* when there is no edid and no monitor is connected with VGA
1547 * port, try to use the CRT ddc to read the EDID for DVI-connector 1464 * port, try to use the CRT ddc to read the EDID for DVI-connector
1548 */ 1465 */
1549 if (edid == NULL && sdvo_priv->analog_ddc_bus && 1466 if (edid == NULL && intel_sdvo->analog_ddc_bus &&
1550 !intel_analog_is_connected(connector->dev)) 1467 !intel_analog_is_connected(connector->dev))
1551 edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus); 1468 edid = drm_get_edid(connector, intel_sdvo->analog_ddc_bus);
1552 1469
1553 if (edid != NULL) { 1470 if (edid != NULL) {
1554 bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL); 1471 bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1555 bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK); 1472 bool need_digital = !!(intel_sdvo_connector->output_flag & SDVO_TMDS_MASK);
1556 1473
1557 /* DDC bus is shared, match EDID to connector type */ 1474 /* DDC bus is shared, match EDID to connector type */
1558 if (is_digital && need_digital) 1475 if (is_digital && need_digital)
1559 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); 1476 intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
1560 else if (is_digital != need_digital) 1477 else if (is_digital != need_digital)
1561 status = connector_status_disconnected; 1478 status = connector_status_disconnected;
1562 1479
@@ -1572,33 +1489,29 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1572static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) 1489static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
1573{ 1490{
1574 uint16_t response; 1491 uint16_t response;
1575 u8 status;
1576 struct drm_encoder *encoder = intel_attached_encoder(connector); 1492 struct drm_encoder *encoder = intel_attached_encoder(connector);
1577 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1493 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1578 struct intel_connector *intel_connector = to_intel_connector(connector); 1494 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1579 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1580 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1581 enum drm_connector_status ret; 1495 enum drm_connector_status ret;
1582 1496
1583 intel_sdvo_write_cmd(intel_encoder, 1497 if (!intel_sdvo_write_cmd(intel_sdvo,
1584 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1498 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
1585 if (sdvo_priv->is_tv) { 1499 return connector_status_unknown;
1500 if (intel_sdvo->is_tv) {
1586 /* add 30ms delay when the output type is SDVO-TV */ 1501 /* add 30ms delay when the output type is SDVO-TV */
1587 mdelay(30); 1502 mdelay(30);
1588 } 1503 }
1589 status = intel_sdvo_read_response(intel_encoder, &response, 2); 1504 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1505 return connector_status_unknown;
1590 1506
1591 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); 1507 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
1592 1508
1593 if (status != SDVO_CMD_STATUS_SUCCESS)
1594 return connector_status_unknown;
1595
1596 if (response == 0) 1509 if (response == 0)
1597 return connector_status_disconnected; 1510 return connector_status_disconnected;
1598 1511
1599 sdvo_priv->attached_output = response; 1512 intel_sdvo->attached_output = response;
1600 1513
1601 if ((sdvo_connector->output_flag & response) == 0) 1514 if ((intel_sdvo_connector->output_flag & response) == 0)
1602 ret = connector_status_disconnected; 1515 ret = connector_status_disconnected;
1603 else if (response & SDVO_TMDS_MASK) 1516 else if (response & SDVO_TMDS_MASK)
1604 ret = intel_sdvo_hdmi_sink_detect(connector); 1517 ret = intel_sdvo_hdmi_sink_detect(connector);
@@ -1607,16 +1520,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1607 1520
1608 /* May update encoder flag for like clock for SDVO TV, etc.*/ 1521 /* May update encoder flag for like clock for SDVO TV, etc.*/
1609 if (ret == connector_status_connected) { 1522 if (ret == connector_status_connected) {
1610 sdvo_priv->is_tv = false; 1523 intel_sdvo->is_tv = false;
1611 sdvo_priv->is_lvds = false; 1524 intel_sdvo->is_lvds = false;
1612 intel_encoder->needs_tv_clock = false; 1525 intel_sdvo->base.needs_tv_clock = false;
1613 1526
1614 if (response & SDVO_TV_MASK) { 1527 if (response & SDVO_TV_MASK) {
1615 sdvo_priv->is_tv = true; 1528 intel_sdvo->is_tv = true;
1616 intel_encoder->needs_tv_clock = true; 1529 intel_sdvo->base.needs_tv_clock = true;
1617 } 1530 }
1618 if (response & SDVO_LVDS_MASK) 1531 if (response & SDVO_LVDS_MASK)
1619 sdvo_priv->is_lvds = true; 1532 intel_sdvo->is_lvds = intel_sdvo->sdvo_lvds_fixed_mode != NULL;
1620 } 1533 }
1621 1534
1622 return ret; 1535 return ret;
@@ -1625,12 +1538,11 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1625static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1538static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1626{ 1539{
1627 struct drm_encoder *encoder = intel_attached_encoder(connector); 1540 struct drm_encoder *encoder = intel_attached_encoder(connector);
1628 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1541 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1629 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1630 int num_modes; 1542 int num_modes;
1631 1543
1632 /* set the bus switch and get the modes */ 1544 /* set the bus switch and get the modes */
1633 num_modes = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); 1545 num_modes = intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
1634 1546
1635 /* 1547 /*
1636 * Mac mini hack. On this device, the DVI-I connector shares one DDC 1548 * Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1639,11 +1551,11 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1639 * which case we'll look there for the digital DDC data. 1551 * which case we'll look there for the digital DDC data.
1640 */ 1552 */
1641 if (num_modes == 0 && 1553 if (num_modes == 0 &&
1642 sdvo_priv->analog_ddc_bus && 1554 intel_sdvo->analog_ddc_bus &&
1643 !intel_analog_is_connected(connector->dev)) { 1555 !intel_analog_is_connected(connector->dev)) {
1644 /* Switch to the analog ddc bus and try that 1556 /* Switch to the analog ddc bus and try that
1645 */ 1557 */
1646 (void) intel_ddc_get_modes(connector, sdvo_priv->analog_ddc_bus); 1558 (void) intel_ddc_get_modes(connector, intel_sdvo->analog_ddc_bus);
1647 } 1559 }
1648} 1560}
1649 1561
@@ -1715,52 +1627,43 @@ struct drm_display_mode sdvo_tv_modes[] = {
1715static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1627static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1716{ 1628{
1717 struct drm_encoder *encoder = intel_attached_encoder(connector); 1629 struct drm_encoder *encoder = intel_attached_encoder(connector);
1718 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1630 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1719 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1720 struct intel_sdvo_sdtv_resolution_request tv_res; 1631 struct intel_sdvo_sdtv_resolution_request tv_res;
1721 uint32_t reply = 0, format_map = 0; 1632 uint32_t reply = 0, format_map = 0;
1722 int i; 1633 int i;
1723 uint8_t status;
1724
1725 1634
1726 /* Read the list of supported input resolutions for the selected TV 1635 /* Read the list of supported input resolutions for the selected TV
1727 * format. 1636 * format.
1728 */ 1637 */
1729 for (i = 0; i < TV_FORMAT_NUM; i++) 1638 format_map = 1 << intel_sdvo->tv_format_index;
1730 if (tv_format_names[i] == sdvo_priv->tv_format_name)
1731 break;
1732
1733 format_map = (1 << i);
1734 memcpy(&tv_res, &format_map, 1639 memcpy(&tv_res, &format_map,
1735 sizeof(struct intel_sdvo_sdtv_resolution_request) > 1640 min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request)));
1736 sizeof(format_map) ? sizeof(format_map) :
1737 sizeof(struct intel_sdvo_sdtv_resolution_request));
1738 1641
1739 intel_sdvo_set_target_output(intel_encoder, sdvo_priv->attached_output); 1642 if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output))
1643 return;
1740 1644
1741 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, 1645 BUILD_BUG_ON(sizeof(tv_res) != 3);
1742 &tv_res, sizeof(tv_res)); 1646 if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
1743 status = intel_sdvo_read_response(intel_encoder, &reply, 3); 1647 &tv_res, sizeof(tv_res)))
1744 if (status != SDVO_CMD_STATUS_SUCCESS) 1648 return;
1649 if (!intel_sdvo_read_response(intel_sdvo, &reply, 3))
1745 return; 1650 return;
1746 1651
1747 for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) 1652 for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
1748 if (reply & (1 << i)) { 1653 if (reply & (1 << i)) {
1749 struct drm_display_mode *nmode; 1654 struct drm_display_mode *nmode;
1750 nmode = drm_mode_duplicate(connector->dev, 1655 nmode = drm_mode_duplicate(connector->dev,
1751 &sdvo_tv_modes[i]); 1656 &sdvo_tv_modes[i]);
1752 if (nmode) 1657 if (nmode)
1753 drm_mode_probed_add(connector, nmode); 1658 drm_mode_probed_add(connector, nmode);
1754 } 1659 }
1755
1756} 1660}
1757 1661
1758static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1662static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1759{ 1663{
1760 struct drm_encoder *encoder = intel_attached_encoder(connector); 1664 struct drm_encoder *encoder = intel_attached_encoder(connector);
1761 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1665 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1762 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1666 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1763 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1764 struct drm_display_mode *newmode; 1667 struct drm_display_mode *newmode;
1765 1668
1766 /* 1669 /*
@@ -1768,7 +1671,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1768 * Assume that the preferred modes are 1671 * Assume that the preferred modes are
1769 * arranged in priority order. 1672 * arranged in priority order.
1770 */ 1673 */
1771 intel_ddc_get_modes(connector, intel_encoder->ddc_bus); 1674 intel_ddc_get_modes(connector, intel_sdvo->base.ddc_bus);
1772 if (list_empty(&connector->probed_modes) == false) 1675 if (list_empty(&connector->probed_modes) == false)
1773 goto end; 1676 goto end;
1774 1677
@@ -1787,8 +1690,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1787end: 1690end:
1788 list_for_each_entry(newmode, &connector->probed_modes, head) { 1691 list_for_each_entry(newmode, &connector->probed_modes, head) {
1789 if (newmode->type & DRM_MODE_TYPE_PREFERRED) { 1692 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1790 sdvo_priv->sdvo_lvds_fixed_mode = 1693 intel_sdvo->sdvo_lvds_fixed_mode =
1791 drm_mode_duplicate(connector->dev, newmode); 1694 drm_mode_duplicate(connector->dev, newmode);
1695 intel_sdvo->is_lvds = true;
1792 break; 1696 break;
1793 } 1697 }
1794 } 1698 }
@@ -1797,66 +1701,67 @@ end:
1797 1701
1798static int intel_sdvo_get_modes(struct drm_connector *connector) 1702static int intel_sdvo_get_modes(struct drm_connector *connector)
1799{ 1703{
1800 struct intel_connector *intel_connector = to_intel_connector(connector); 1704 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1801 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1802 1705
1803 if (IS_TV(sdvo_connector)) 1706 if (IS_TV(intel_sdvo_connector))
1804 intel_sdvo_get_tv_modes(connector); 1707 intel_sdvo_get_tv_modes(connector);
1805 else if (IS_LVDS(sdvo_connector)) 1708 else if (IS_LVDS(intel_sdvo_connector))
1806 intel_sdvo_get_lvds_modes(connector); 1709 intel_sdvo_get_lvds_modes(connector);
1807 else 1710 else
1808 intel_sdvo_get_ddc_modes(connector); 1711 intel_sdvo_get_ddc_modes(connector);
1809 1712
1810 if (list_empty(&connector->probed_modes)) 1713 return !list_empty(&connector->probed_modes);
1811 return 0;
1812 return 1;
1813} 1714}
1814 1715
1815static 1716static void
1816void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) 1717intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1817{ 1718{
1818 struct intel_connector *intel_connector = to_intel_connector(connector); 1719 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1819 struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
1820 struct drm_device *dev = connector->dev; 1720 struct drm_device *dev = connector->dev;
1821 1721
1822 if (IS_TV(sdvo_priv)) { 1722 if (intel_sdvo_connector->left)
1823 if (sdvo_priv->left_property) 1723 drm_property_destroy(dev, intel_sdvo_connector->left);
1824 drm_property_destroy(dev, sdvo_priv->left_property); 1724 if (intel_sdvo_connector->right)
1825 if (sdvo_priv->right_property) 1725 drm_property_destroy(dev, intel_sdvo_connector->right);
1826 drm_property_destroy(dev, sdvo_priv->right_property); 1726 if (intel_sdvo_connector->top)
1827 if (sdvo_priv->top_property) 1727 drm_property_destroy(dev, intel_sdvo_connector->top);
1828 drm_property_destroy(dev, sdvo_priv->top_property); 1728 if (intel_sdvo_connector->bottom)
1829 if (sdvo_priv->bottom_property) 1729 drm_property_destroy(dev, intel_sdvo_connector->bottom);
1830 drm_property_destroy(dev, sdvo_priv->bottom_property); 1730 if (intel_sdvo_connector->hpos)
1831 if (sdvo_priv->hpos_property) 1731 drm_property_destroy(dev, intel_sdvo_connector->hpos);
1832 drm_property_destroy(dev, sdvo_priv->hpos_property); 1732 if (intel_sdvo_connector->vpos)
1833 if (sdvo_priv->vpos_property) 1733 drm_property_destroy(dev, intel_sdvo_connector->vpos);
1834 drm_property_destroy(dev, sdvo_priv->vpos_property); 1734 if (intel_sdvo_connector->saturation)
1835 if (sdvo_priv->saturation_property) 1735 drm_property_destroy(dev, intel_sdvo_connector->saturation);
1836 drm_property_destroy(dev, 1736 if (intel_sdvo_connector->contrast)
1837 sdvo_priv->saturation_property); 1737 drm_property_destroy(dev, intel_sdvo_connector->contrast);
1838 if (sdvo_priv->contrast_property) 1738 if (intel_sdvo_connector->hue)
1839 drm_property_destroy(dev, 1739 drm_property_destroy(dev, intel_sdvo_connector->hue);
1840 sdvo_priv->contrast_property); 1740 if (intel_sdvo_connector->sharpness)
1841 if (sdvo_priv->hue_property) 1741 drm_property_destroy(dev, intel_sdvo_connector->sharpness);
1842 drm_property_destroy(dev, sdvo_priv->hue_property); 1742 if (intel_sdvo_connector->flicker_filter)
1843 } 1743 drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
1844 if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { 1744 if (intel_sdvo_connector->flicker_filter_2d)
1845 if (sdvo_priv->brightness_property) 1745 drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
1846 drm_property_destroy(dev, 1746 if (intel_sdvo_connector->flicker_filter_adaptive)
1847 sdvo_priv->brightness_property); 1747 drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
1848 } 1748 if (intel_sdvo_connector->tv_luma_filter)
1849 return; 1749 drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
1750 if (intel_sdvo_connector->tv_chroma_filter)
1751 drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
1752 if (intel_sdvo_connector->dot_crawl)
1753 drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
1754 if (intel_sdvo_connector->brightness)
1755 drm_property_destroy(dev, intel_sdvo_connector->brightness);
1850} 1756}
1851 1757
1852static void intel_sdvo_destroy(struct drm_connector *connector) 1758static void intel_sdvo_destroy(struct drm_connector *connector)
1853{ 1759{
1854 struct intel_connector *intel_connector = to_intel_connector(connector); 1760 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1855 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1856 1761
1857 if (sdvo_connector->tv_format_property) 1762 if (intel_sdvo_connector->tv_format)
1858 drm_property_destroy(connector->dev, 1763 drm_property_destroy(connector->dev,
1859 sdvo_connector->tv_format_property); 1764 intel_sdvo_connector->tv_format);
1860 1765
1861 intel_sdvo_destroy_enhance_property(connector); 1766 intel_sdvo_destroy_enhance_property(connector);
1862 drm_sysfs_connector_remove(connector); 1767 drm_sysfs_connector_remove(connector);
@@ -1870,132 +1775,118 @@ intel_sdvo_set_property(struct drm_connector *connector,
1870 uint64_t val) 1775 uint64_t val)
1871{ 1776{
1872 struct drm_encoder *encoder = intel_attached_encoder(connector); 1777 struct drm_encoder *encoder = intel_attached_encoder(connector);
1873 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1778 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
1874 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1779 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1875 struct intel_connector *intel_connector = to_intel_connector(connector);
1876 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1877 struct drm_crtc *crtc = encoder->crtc;
1878 int ret = 0;
1879 bool changed = false;
1880 uint8_t cmd, status;
1881 uint16_t temp_value; 1780 uint16_t temp_value;
1781 uint8_t cmd;
1782 int ret;
1882 1783
1883 ret = drm_connector_property_set_value(connector, property, val); 1784 ret = drm_connector_property_set_value(connector, property, val);
1884 if (ret < 0) 1785 if (ret)
1885 goto out; 1786 return ret;
1787
1788#define CHECK_PROPERTY(name, NAME) \
1789 if (intel_sdvo_connector->name == property) { \
1790 if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
1791 if (intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
1792 cmd = SDVO_CMD_SET_##NAME; \
1793 intel_sdvo_connector->cur_##name = temp_value; \
1794 goto set_value; \
1795 }
1886 1796
1887 if (property == sdvo_connector->tv_format_property) { 1797 if (property == intel_sdvo_connector->tv_format) {
1888 if (val >= TV_FORMAT_NUM) { 1798 if (val >= TV_FORMAT_NUM)
1889 ret = -EINVAL; 1799 return -EINVAL;
1890 goto out;
1891 }
1892 if (sdvo_priv->tv_format_name ==
1893 sdvo_connector->tv_format_supported[val])
1894 goto out;
1895 1800
1896 sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[val]; 1801 if (intel_sdvo->tv_format_index ==
1897 changed = true; 1802 intel_sdvo_connector->tv_format_supported[val])
1898 } 1803 return 0;
1899 1804
1900 if (IS_TV(sdvo_connector) || IS_LVDS(sdvo_connector)) { 1805 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[val];
1901 cmd = 0; 1806 goto done;
1807 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
1902 temp_value = val; 1808 temp_value = val;
1903 if (sdvo_connector->left_property == property) { 1809 if (intel_sdvo_connector->left == property) {
1904 drm_connector_property_set_value(connector, 1810 drm_connector_property_set_value(connector,
1905 sdvo_connector->right_property, val); 1811 intel_sdvo_connector->right, val);
1906 if (sdvo_connector->left_margin == temp_value) 1812 if (intel_sdvo_connector->left_margin == temp_value)
1907 goto out; 1813 return 0;
1908 1814
1909 sdvo_connector->left_margin = temp_value; 1815 intel_sdvo_connector->left_margin = temp_value;
1910 sdvo_connector->right_margin = temp_value; 1816 intel_sdvo_connector->right_margin = temp_value;
1911 temp_value = sdvo_connector->max_hscan - 1817 temp_value = intel_sdvo_connector->max_hscan -
1912 sdvo_connector->left_margin; 1818 intel_sdvo_connector->left_margin;
1913 cmd = SDVO_CMD_SET_OVERSCAN_H; 1819 cmd = SDVO_CMD_SET_OVERSCAN_H;
1914 } else if (sdvo_connector->right_property == property) { 1820 goto set_value;
1821 } else if (intel_sdvo_connector->right == property) {
1915 drm_connector_property_set_value(connector, 1822 drm_connector_property_set_value(connector,
1916 sdvo_connector->left_property, val); 1823 intel_sdvo_connector->left, val);
1917 if (sdvo_connector->right_margin == temp_value) 1824 if (intel_sdvo_connector->right_margin == temp_value)
1918 goto out; 1825 return 0;
1919 1826
1920 sdvo_connector->left_margin = temp_value; 1827 intel_sdvo_connector->left_margin = temp_value;
1921 sdvo_connector->right_margin = temp_value; 1828 intel_sdvo_connector->right_margin = temp_value;
1922 temp_value = sdvo_connector->max_hscan - 1829 temp_value = intel_sdvo_connector->max_hscan -
1923 sdvo_connector->left_margin; 1830 intel_sdvo_connector->left_margin;
1924 cmd = SDVO_CMD_SET_OVERSCAN_H; 1831 cmd = SDVO_CMD_SET_OVERSCAN_H;
1925 } else if (sdvo_connector->top_property == property) { 1832 goto set_value;
1833 } else if (intel_sdvo_connector->top == property) {
1926 drm_connector_property_set_value(connector, 1834 drm_connector_property_set_value(connector,
1927 sdvo_connector->bottom_property, val); 1835 intel_sdvo_connector->bottom, val);
1928 if (sdvo_connector->top_margin == temp_value) 1836 if (intel_sdvo_connector->top_margin == temp_value)
1929 goto out; 1837 return 0;
1930 1838
1931 sdvo_connector->top_margin = temp_value; 1839 intel_sdvo_connector->top_margin = temp_value;
1932 sdvo_connector->bottom_margin = temp_value; 1840 intel_sdvo_connector->bottom_margin = temp_value;
1933 temp_value = sdvo_connector->max_vscan - 1841 temp_value = intel_sdvo_connector->max_vscan -
1934 sdvo_connector->top_margin; 1842 intel_sdvo_connector->top_margin;
1935 cmd = SDVO_CMD_SET_OVERSCAN_V; 1843 cmd = SDVO_CMD_SET_OVERSCAN_V;
1936 } else if (sdvo_connector->bottom_property == property) { 1844 goto set_value;
1845 } else if (intel_sdvo_connector->bottom == property) {
1937 drm_connector_property_set_value(connector, 1846 drm_connector_property_set_value(connector,
1938 sdvo_connector->top_property, val); 1847 intel_sdvo_connector->top, val);
1939 if (sdvo_connector->bottom_margin == temp_value) 1848 if (intel_sdvo_connector->bottom_margin == temp_value)
1940 goto out; 1849 return 0;
1941 sdvo_connector->top_margin = temp_value; 1850
1942 sdvo_connector->bottom_margin = temp_value; 1851 intel_sdvo_connector->top_margin = temp_value;
1943 temp_value = sdvo_connector->max_vscan - 1852 intel_sdvo_connector->bottom_margin = temp_value;
1944 sdvo_connector->top_margin; 1853 temp_value = intel_sdvo_connector->max_vscan -
1854 intel_sdvo_connector->top_margin;
1945 cmd = SDVO_CMD_SET_OVERSCAN_V; 1855 cmd = SDVO_CMD_SET_OVERSCAN_V;
1946 } else if (sdvo_connector->hpos_property == property) { 1856 goto set_value;
1947 if (sdvo_connector->cur_hpos == temp_value)
1948 goto out;
1949
1950 cmd = SDVO_CMD_SET_POSITION_H;
1951 sdvo_connector->cur_hpos = temp_value;
1952 } else if (sdvo_connector->vpos_property == property) {
1953 if (sdvo_connector->cur_vpos == temp_value)
1954 goto out;
1955
1956 cmd = SDVO_CMD_SET_POSITION_V;
1957 sdvo_connector->cur_vpos = temp_value;
1958 } else if (sdvo_connector->saturation_property == property) {
1959 if (sdvo_connector->cur_saturation == temp_value)
1960 goto out;
1961
1962 cmd = SDVO_CMD_SET_SATURATION;
1963 sdvo_connector->cur_saturation = temp_value;
1964 } else if (sdvo_connector->contrast_property == property) {
1965 if (sdvo_connector->cur_contrast == temp_value)
1966 goto out;
1967
1968 cmd = SDVO_CMD_SET_CONTRAST;
1969 sdvo_connector->cur_contrast = temp_value;
1970 } else if (sdvo_connector->hue_property == property) {
1971 if (sdvo_connector->cur_hue == temp_value)
1972 goto out;
1973
1974 cmd = SDVO_CMD_SET_HUE;
1975 sdvo_connector->cur_hue = temp_value;
1976 } else if (sdvo_connector->brightness_property == property) {
1977 if (sdvo_connector->cur_brightness == temp_value)
1978 goto out;
1979
1980 cmd = SDVO_CMD_SET_BRIGHTNESS;
1981 sdvo_connector->cur_brightness = temp_value;
1982 }
1983 if (cmd) {
1984 intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
1985 status = intel_sdvo_read_response(intel_encoder,
1986 NULL, 0);
1987 if (status != SDVO_CMD_STATUS_SUCCESS) {
1988 DRM_DEBUG_KMS("Incorrect SDVO command \n");
1989 return -EINVAL;
1990 }
1991 changed = true;
1992 } 1857 }
1858 CHECK_PROPERTY(hpos, HPOS)
1859 CHECK_PROPERTY(vpos, VPOS)
1860 CHECK_PROPERTY(saturation, SATURATION)
1861 CHECK_PROPERTY(contrast, CONTRAST)
1862 CHECK_PROPERTY(hue, HUE)
1863 CHECK_PROPERTY(brightness, BRIGHTNESS)
1864 CHECK_PROPERTY(sharpness, SHARPNESS)
1865 CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
1866 CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
1867 CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
1868 CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
1869 CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
1870 CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
1993 } 1871 }
1994 if (changed && crtc) 1872
1873 return -EINVAL; /* unknown property */
1874
1875set_value:
1876 if (!intel_sdvo_set_value(intel_sdvo, cmd, &temp_value, 2))
1877 return -EIO;
1878
1879
1880done:
1881 if (encoder->crtc) {
1882 struct drm_crtc *crtc = encoder->crtc;
1883
1995 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 1884 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
1996 crtc->y, crtc->fb); 1885 crtc->y, crtc->fb);
1997out: 1886 }
1998 return ret; 1887
1888 return 0;
1889#undef CHECK_PROPERTY
1999} 1890}
2000 1891
2001static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { 1892static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
@@ -2022,28 +1913,57 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
2022 1913
2023static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 1914static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
2024{ 1915{
2025 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1916 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
2026 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2027 1917
2028 if (intel_encoder->i2c_bus) 1918 if (intel_sdvo->analog_ddc_bus)
2029 intel_i2c_destroy(intel_encoder->i2c_bus); 1919 intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
2030 if (intel_encoder->ddc_bus)
2031 intel_i2c_destroy(intel_encoder->ddc_bus);
2032 if (sdvo_priv->analog_ddc_bus)
2033 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
2034 1920
2035 if (sdvo_priv->sdvo_lvds_fixed_mode != NULL) 1921 if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
2036 drm_mode_destroy(encoder->dev, 1922 drm_mode_destroy(encoder->dev,
2037 sdvo_priv->sdvo_lvds_fixed_mode); 1923 intel_sdvo->sdvo_lvds_fixed_mode);
2038 1924
2039 drm_encoder_cleanup(encoder); 1925 intel_encoder_destroy(encoder);
2040 kfree(intel_encoder);
2041} 1926}
2042 1927
2043static const struct drm_encoder_funcs intel_sdvo_enc_funcs = { 1928static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
2044 .destroy = intel_sdvo_enc_destroy, 1929 .destroy = intel_sdvo_enc_destroy,
2045}; 1930};
2046 1931
1932static void
1933intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
1934{
1935 uint16_t mask = 0;
1936 unsigned int num_bits;
1937
1938 /* Make a mask of outputs less than or equal to our own priority in the
1939 * list.
1940 */
1941 switch (sdvo->controlled_output) {
1942 case SDVO_OUTPUT_LVDS1:
1943 mask |= SDVO_OUTPUT_LVDS1;
1944 case SDVO_OUTPUT_LVDS0:
1945 mask |= SDVO_OUTPUT_LVDS0;
1946 case SDVO_OUTPUT_TMDS1:
1947 mask |= SDVO_OUTPUT_TMDS1;
1948 case SDVO_OUTPUT_TMDS0:
1949 mask |= SDVO_OUTPUT_TMDS0;
1950 case SDVO_OUTPUT_RGB1:
1951 mask |= SDVO_OUTPUT_RGB1;
1952 case SDVO_OUTPUT_RGB0:
1953 mask |= SDVO_OUTPUT_RGB0;
1954 break;
1955 }
1956
1957 /* Count bits to find what number we are in the priority list. */
1958 mask &= sdvo->caps.output_flags;
1959 num_bits = hweight16(mask);
1960 /* If more than 3 outputs, default to DDC bus 3 for now. */
1961 if (num_bits > 3)
1962 num_bits = 3;
1963
1964 /* Corresponds to SDVO_CONTROL_BUS_DDCx */
1965 sdvo->ddc_bus = 1 << num_bits;
1966}
2047 1967
2048/** 1968/**
2049 * Choose the appropriate DDC bus for control bus switch command for this 1969 * Choose the appropriate DDC bus for control bus switch command for this
@@ -2054,7 +1974,7 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
2054 */ 1974 */
2055static void 1975static void
2056intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, 1976intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
2057 struct intel_sdvo_priv *sdvo, u32 reg) 1977 struct intel_sdvo *sdvo, u32 reg)
2058{ 1978{
2059 struct sdvo_device_mapping *mapping; 1979 struct sdvo_device_mapping *mapping;
2060 1980
@@ -2063,61 +1983,53 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
2063 else 1983 else
2064 mapping = &(dev_priv->sdvo_mappings[1]); 1984 mapping = &(dev_priv->sdvo_mappings[1]);
2065 1985
2066 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); 1986 if (mapping->initialized)
1987 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
1988 else
1989 intel_sdvo_guess_ddc_bus(sdvo);
2067} 1990}
2068 1991
2069static bool 1992static bool
2070intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output, int device) 1993intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device)
2071{ 1994{
2072 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1995 return intel_sdvo_set_target_output(intel_sdvo,
2073 uint8_t status; 1996 device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) &&
2074 1997 intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
2075 if (device == 0) 1998 &intel_sdvo->is_hdmi, 1);
2076 intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS0);
2077 else
2078 intel_sdvo_set_target_output(output, SDVO_OUTPUT_TMDS1);
2079
2080 intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
2081 status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
2082 if (status != SDVO_CMD_STATUS_SUCCESS)
2083 return false;
2084 return true;
2085} 1999}
2086 2000
2087static struct intel_encoder * 2001static struct intel_sdvo *
2088intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan) 2002intel_sdvo_chan_to_intel_sdvo(struct intel_i2c_chan *chan)
2089{ 2003{
2090 struct drm_device *dev = chan->drm_dev; 2004 struct drm_device *dev = chan->drm_dev;
2091 struct drm_encoder *encoder; 2005 struct drm_encoder *encoder;
2092 struct intel_encoder *intel_encoder = NULL;
2093 2006
2094 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2007 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2095 intel_encoder = enc_to_intel_encoder(encoder); 2008 struct intel_sdvo *intel_sdvo = enc_to_intel_sdvo(encoder);
2096 if (intel_encoder->ddc_bus == &chan->adapter) 2009 if (intel_sdvo->base.ddc_bus == &chan->adapter)
2097 break; 2010 return intel_sdvo;
2098 } 2011 }
2099 return intel_encoder; 2012
2013 return NULL;
2100} 2014}
2101 2015
2102static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, 2016static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
2103 struct i2c_msg msgs[], int num) 2017 struct i2c_msg msgs[], int num)
2104{ 2018{
2105 struct intel_encoder *intel_encoder; 2019 struct intel_sdvo *intel_sdvo;
2106 struct intel_sdvo_priv *sdvo_priv;
2107 struct i2c_algo_bit_data *algo_data; 2020 struct i2c_algo_bit_data *algo_data;
2108 const struct i2c_algorithm *algo; 2021 const struct i2c_algorithm *algo;
2109 2022
2110 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; 2023 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
2111 intel_encoder = 2024 intel_sdvo =
2112 intel_sdvo_chan_to_intel_encoder( 2025 intel_sdvo_chan_to_intel_sdvo((struct intel_i2c_chan *)
2113 (struct intel_i2c_chan *)(algo_data->data)); 2026 (algo_data->data));
2114 if (intel_encoder == NULL) 2027 if (intel_sdvo == NULL)
2115 return -EINVAL; 2028 return -EINVAL;
2116 2029
2117 sdvo_priv = intel_encoder->dev_priv; 2030 algo = intel_sdvo->base.i2c_bus->algo;
2118 algo = intel_encoder->i2c_bus->algo;
2119 2031
2120 intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus); 2032 intel_sdvo_set_control_bus_switch(intel_sdvo, intel_sdvo->ddc_bus);
2121 return algo->master_xfer(i2c_adap, msgs, num); 2033 return algo->master_xfer(i2c_adap, msgs, num);
2122} 2034}
2123 2035
@@ -2162,27 +2074,9 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2162 return 0x72; 2074 return 0x72;
2163} 2075}
2164 2076
2165static bool
2166intel_sdvo_connector_alloc (struct intel_connector **ret)
2167{
2168 struct intel_connector *intel_connector;
2169 struct intel_sdvo_connector *sdvo_connector;
2170
2171 *ret = kzalloc(sizeof(*intel_connector) +
2172 sizeof(*sdvo_connector), GFP_KERNEL);
2173 if (!*ret)
2174 return false;
2175
2176 intel_connector = *ret;
2177 sdvo_connector = (struct intel_sdvo_connector *)(intel_connector + 1);
2178 intel_connector->dev_priv = sdvo_connector;
2179
2180 return true;
2181}
2182
2183static void 2077static void
2184intel_sdvo_connector_create (struct drm_encoder *encoder, 2078intel_sdvo_connector_init(struct drm_encoder *encoder,
2185 struct drm_connector *connector) 2079 struct drm_connector *connector)
2186{ 2080{
2187 drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs, 2081 drm_connector_init(encoder->dev, connector, &intel_sdvo_connector_funcs,
2188 connector->connector_type); 2082 connector->connector_type);
@@ -2198,582 +2092,470 @@ intel_sdvo_connector_create (struct drm_encoder *encoder,
2198} 2092}
2199 2093
2200static bool 2094static bool
2201intel_sdvo_dvi_init(struct intel_encoder *intel_encoder, int device) 2095intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2202{ 2096{
2203 struct drm_encoder *encoder = &intel_encoder->enc; 2097 struct drm_encoder *encoder = &intel_sdvo->base.enc;
2204 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2205 struct drm_connector *connector; 2098 struct drm_connector *connector;
2206 struct intel_connector *intel_connector; 2099 struct intel_connector *intel_connector;
2207 struct intel_sdvo_connector *sdvo_connector; 2100 struct intel_sdvo_connector *intel_sdvo_connector;
2208 2101
2209 if (!intel_sdvo_connector_alloc(&intel_connector)) 2102 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2103 if (!intel_sdvo_connector)
2210 return false; 2104 return false;
2211 2105
2212 sdvo_connector = intel_connector->dev_priv;
2213
2214 if (device == 0) { 2106 if (device == 0) {
2215 sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS0; 2107 intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
2216 sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; 2108 intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
2217 } else if (device == 1) { 2109 } else if (device == 1) {
2218 sdvo_priv->controlled_output |= SDVO_OUTPUT_TMDS1; 2110 intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
2219 sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; 2111 intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
2220 } 2112 }
2221 2113
2114 intel_connector = &intel_sdvo_connector->base;
2222 connector = &intel_connector->base; 2115 connector = &intel_connector->base;
2223 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 2116 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2224 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2117 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2225 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2118 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2226 2119
2227 if (intel_sdvo_get_supp_encode(intel_encoder, &sdvo_priv->encode) 2120 if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode)
2228 && intel_sdvo_get_digital_encoding_mode(intel_encoder, device) 2121 && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device)
2229 && sdvo_priv->is_hdmi) { 2122 && intel_sdvo->is_hdmi) {
2230 /* enable hdmi encoding mode if supported */ 2123 /* enable hdmi encoding mode if supported */
2231 intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI); 2124 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
2232 intel_sdvo_set_colorimetry(intel_encoder, 2125 intel_sdvo_set_colorimetry(intel_sdvo,
2233 SDVO_COLORIMETRY_RGB256); 2126 SDVO_COLORIMETRY_RGB256);
2234 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2127 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2235 } 2128 }
2236 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2129 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2237 (1 << INTEL_ANALOG_CLONE_BIT); 2130 (1 << INTEL_ANALOG_CLONE_BIT));
2238 2131
2239 intel_sdvo_connector_create(encoder, connector); 2132 intel_sdvo_connector_init(encoder, connector);
2240 2133
2241 return true; 2134 return true;
2242} 2135}
2243 2136
2244static bool 2137static bool
2245intel_sdvo_tv_init(struct intel_encoder *intel_encoder, int type) 2138intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2246{ 2139{
2247 struct drm_encoder *encoder = &intel_encoder->enc; 2140 struct drm_encoder *encoder = &intel_sdvo->base.enc;
2248 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2249 struct drm_connector *connector; 2141 struct drm_connector *connector;
2250 struct intel_connector *intel_connector; 2142 struct intel_connector *intel_connector;
2251 struct intel_sdvo_connector *sdvo_connector; 2143 struct intel_sdvo_connector *intel_sdvo_connector;
2252 2144
2253 if (!intel_sdvo_connector_alloc(&intel_connector)) 2145 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2254 return false; 2146 if (!intel_sdvo_connector)
2147 return false;
2255 2148
2149 intel_connector = &intel_sdvo_connector->base;
2256 connector = &intel_connector->base; 2150 connector = &intel_connector->base;
2257 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2151 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2258 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2152 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2259 sdvo_connector = intel_connector->dev_priv;
2260 2153
2261 sdvo_priv->controlled_output |= type; 2154 intel_sdvo->controlled_output |= type;
2262 sdvo_connector->output_flag = type; 2155 intel_sdvo_connector->output_flag = type;
2263 2156
2264 sdvo_priv->is_tv = true; 2157 intel_sdvo->is_tv = true;
2265 intel_encoder->needs_tv_clock = true; 2158 intel_sdvo->base.needs_tv_clock = true;
2266 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2159 intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2267 2160
2268 intel_sdvo_connector_create(encoder, connector); 2161 intel_sdvo_connector_init(encoder, connector);
2269 2162
2270 intel_sdvo_tv_create_property(connector, type); 2163 if (!intel_sdvo_tv_create_property(intel_sdvo, intel_sdvo_connector, type))
2164 goto err;
2271 2165
2272 intel_sdvo_create_enhance_property(connector); 2166 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2167 goto err;
2273 2168
2274 return true; 2169 return true;
2170
2171err:
2172 intel_sdvo_destroy_enhance_property(connector);
2173 kfree(intel_sdvo_connector);
2174 return false;
2275} 2175}
2276 2176
2277static bool 2177static bool
2278intel_sdvo_analog_init(struct intel_encoder *intel_encoder, int device) 2178intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2279{ 2179{
2280 struct drm_encoder *encoder = &intel_encoder->enc; 2180 struct drm_encoder *encoder = &intel_sdvo->base.enc;
2281 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2282 struct drm_connector *connector; 2181 struct drm_connector *connector;
2283 struct intel_connector *intel_connector; 2182 struct intel_connector *intel_connector;
2284 struct intel_sdvo_connector *sdvo_connector; 2183 struct intel_sdvo_connector *intel_sdvo_connector;
2285 2184
2286 if (!intel_sdvo_connector_alloc(&intel_connector)) 2185 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2287 return false; 2186 if (!intel_sdvo_connector)
2187 return false;
2288 2188
2189 intel_connector = &intel_sdvo_connector->base;
2289 connector = &intel_connector->base; 2190 connector = &intel_connector->base;
2290 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 2191 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2291 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2192 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2292 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2193 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2293 sdvo_connector = intel_connector->dev_priv;
2294 2194
2295 if (device == 0) { 2195 if (device == 0) {
2296 sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB0; 2196 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
2297 sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; 2197 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
2298 } else if (device == 1) { 2198 } else if (device == 1) {
2299 sdvo_priv->controlled_output |= SDVO_OUTPUT_RGB1; 2199 intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
2300 sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2200 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2301 } 2201 }
2302 2202
2303 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2203 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2304 (1 << INTEL_ANALOG_CLONE_BIT); 2204 (1 << INTEL_ANALOG_CLONE_BIT));
2305 2205
2306 intel_sdvo_connector_create(encoder, connector); 2206 intel_sdvo_connector_init(encoder, connector);
2307 return true; 2207 return true;
2308} 2208}
2309 2209
2310static bool 2210static bool
2311intel_sdvo_lvds_init(struct intel_encoder *intel_encoder, int device) 2211intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2312{ 2212{
2313 struct drm_encoder *encoder = &intel_encoder->enc; 2213 struct drm_encoder *encoder = &intel_sdvo->base.enc;
2314 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2315 struct drm_connector *connector; 2214 struct drm_connector *connector;
2316 struct intel_connector *intel_connector; 2215 struct intel_connector *intel_connector;
2317 struct intel_sdvo_connector *sdvo_connector; 2216 struct intel_sdvo_connector *intel_sdvo_connector;
2318 2217
2319 if (!intel_sdvo_connector_alloc(&intel_connector)) 2218 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
2320 return false; 2219 if (!intel_sdvo_connector)
2220 return false;
2321 2221
2322 connector = &intel_connector->base; 2222 intel_connector = &intel_sdvo_connector->base;
2223 connector = &intel_connector->base;
2323 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2224 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2324 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2225 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2325 sdvo_connector = intel_connector->dev_priv;
2326
2327 sdvo_priv->is_lvds = true;
2328 2226
2329 if (device == 0) { 2227 if (device == 0) {
2330 sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS0; 2228 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
2331 sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; 2229 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
2332 } else if (device == 1) { 2230 } else if (device == 1) {
2333 sdvo_priv->controlled_output |= SDVO_OUTPUT_LVDS1; 2231 intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
2334 sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2232 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2335 } 2233 }
2336 2234
2337 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | 2235 intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
2338 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 2236 (1 << INTEL_SDVO_LVDS_CLONE_BIT));
2339 2237
2340 intel_sdvo_connector_create(encoder, connector); 2238 intel_sdvo_connector_init(encoder, connector);
2341 intel_sdvo_create_enhance_property(connector); 2239 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2342 return true; 2240 goto err;
2241
2242 return true;
2243
2244err:
2245 intel_sdvo_destroy_enhance_property(connector);
2246 kfree(intel_sdvo_connector);
2247 return false;
2343} 2248}
2344 2249
2345static bool 2250static bool
2346intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags) 2251intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
2347{ 2252{
2348 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 2253 intel_sdvo->is_tv = false;
2349 2254 intel_sdvo->base.needs_tv_clock = false;
2350 sdvo_priv->is_tv = false; 2255 intel_sdvo->is_lvds = false;
2351 intel_encoder->needs_tv_clock = false;
2352 sdvo_priv->is_lvds = false;
2353 2256
2354 /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ 2257 /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
2355 2258
2356 if (flags & SDVO_OUTPUT_TMDS0) 2259 if (flags & SDVO_OUTPUT_TMDS0)
2357 if (!intel_sdvo_dvi_init(intel_encoder, 0)) 2260 if (!intel_sdvo_dvi_init(intel_sdvo, 0))
2358 return false; 2261 return false;
2359 2262
2360 if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) 2263 if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
2361 if (!intel_sdvo_dvi_init(intel_encoder, 1)) 2264 if (!intel_sdvo_dvi_init(intel_sdvo, 1))
2362 return false; 2265 return false;
2363 2266
2364 /* TV has no XXX1 function block */ 2267 /* TV has no XXX1 function block */
2365 if (flags & SDVO_OUTPUT_SVID0) 2268 if (flags & SDVO_OUTPUT_SVID0)
2366 if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_SVID0)) 2269 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_SVID0))
2367 return false; 2270 return false;
2368 2271
2369 if (flags & SDVO_OUTPUT_CVBS0) 2272 if (flags & SDVO_OUTPUT_CVBS0)
2370 if (!intel_sdvo_tv_init(intel_encoder, SDVO_OUTPUT_CVBS0)) 2273 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
2371 return false; 2274 return false;
2372 2275
2373 if (flags & SDVO_OUTPUT_RGB0) 2276 if (flags & SDVO_OUTPUT_RGB0)
2374 if (!intel_sdvo_analog_init(intel_encoder, 0)) 2277 if (!intel_sdvo_analog_init(intel_sdvo, 0))
2375 return false; 2278 return false;
2376 2279
2377 if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) 2280 if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
2378 if (!intel_sdvo_analog_init(intel_encoder, 1)) 2281 if (!intel_sdvo_analog_init(intel_sdvo, 1))
2379 return false; 2282 return false;
2380 2283
2381 if (flags & SDVO_OUTPUT_LVDS0) 2284 if (flags & SDVO_OUTPUT_LVDS0)
2382 if (!intel_sdvo_lvds_init(intel_encoder, 0)) 2285 if (!intel_sdvo_lvds_init(intel_sdvo, 0))
2383 return false; 2286 return false;
2384 2287
2385 if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) 2288 if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
2386 if (!intel_sdvo_lvds_init(intel_encoder, 1)) 2289 if (!intel_sdvo_lvds_init(intel_sdvo, 1))
2387 return false; 2290 return false;
2388 2291
2389 if ((flags & SDVO_OUTPUT_MASK) == 0) { 2292 if ((flags & SDVO_OUTPUT_MASK) == 0) {
2390 unsigned char bytes[2]; 2293 unsigned char bytes[2];
2391 2294
2392 sdvo_priv->controlled_output = 0; 2295 intel_sdvo->controlled_output = 0;
2393 memcpy(bytes, &sdvo_priv->caps.output_flags, 2); 2296 memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
2394 DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", 2297 DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
2395 SDVO_NAME(sdvo_priv), 2298 SDVO_NAME(intel_sdvo),
2396 bytes[0], bytes[1]); 2299 bytes[0], bytes[1]);
2397 return false; 2300 return false;
2398 } 2301 }
2399 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 2302 intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
2400 2303
2401 return true; 2304 return true;
2402} 2305}
2403 2306
2404static void intel_sdvo_tv_create_property(struct drm_connector *connector, int type) 2307static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2308 struct intel_sdvo_connector *intel_sdvo_connector,
2309 int type)
2405{ 2310{
2406 struct drm_encoder *encoder = intel_attached_encoder(connector); 2311 struct drm_device *dev = intel_sdvo->base.enc.dev;
2407 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
2408 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2409 struct intel_connector *intel_connector = to_intel_connector(connector);
2410 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
2411 struct intel_sdvo_tv_format format; 2312 struct intel_sdvo_tv_format format;
2412 uint32_t format_map, i; 2313 uint32_t format_map, i;
2413 uint8_t status;
2414 2314
2415 intel_sdvo_set_target_output(intel_encoder, type); 2315 if (!intel_sdvo_set_target_output(intel_sdvo, type))
2316 return false;
2416 2317
2417 intel_sdvo_write_cmd(intel_encoder, 2318 if (!intel_sdvo_get_value(intel_sdvo,
2418 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); 2319 SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
2419 status = intel_sdvo_read_response(intel_encoder, 2320 &format, sizeof(format)))
2420 &format, sizeof(format)); 2321 return false;
2421 if (status != SDVO_CMD_STATUS_SUCCESS)
2422 return;
2423 2322
2424 memcpy(&format_map, &format, sizeof(format) > sizeof(format_map) ? 2323 memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
2425 sizeof(format_map) : sizeof(format));
2426 2324
2427 if (format_map == 0) 2325 if (format_map == 0)
2428 return; 2326 return false;
2429 2327
2430 sdvo_connector->format_supported_num = 0; 2328 intel_sdvo_connector->format_supported_num = 0;
2431 for (i = 0 ; i < TV_FORMAT_NUM; i++) 2329 for (i = 0 ; i < TV_FORMAT_NUM; i++)
2432 if (format_map & (1 << i)) { 2330 if (format_map & (1 << i))
2433 sdvo_connector->tv_format_supported 2331 intel_sdvo_connector->tv_format_supported[intel_sdvo_connector->format_supported_num++] = i;
2434 [sdvo_connector->format_supported_num++] =
2435 tv_format_names[i];
2436 }
2437 2332
2438 2333
2439 sdvo_connector->tv_format_property = 2334 intel_sdvo_connector->tv_format =
2440 drm_property_create( 2335 drm_property_create(dev, DRM_MODE_PROP_ENUM,
2441 connector->dev, DRM_MODE_PROP_ENUM, 2336 "mode", intel_sdvo_connector->format_supported_num);
2442 "mode", sdvo_connector->format_supported_num); 2337 if (!intel_sdvo_connector->tv_format)
2338 return false;
2443 2339
2444 for (i = 0; i < sdvo_connector->format_supported_num; i++) 2340 for (i = 0; i < intel_sdvo_connector->format_supported_num; i++)
2445 drm_property_add_enum( 2341 drm_property_add_enum(
2446 sdvo_connector->tv_format_property, i, 2342 intel_sdvo_connector->tv_format, i,
2447 i, sdvo_connector->tv_format_supported[i]); 2343 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
2448 2344
2449 sdvo_priv->tv_format_name = sdvo_connector->tv_format_supported[0]; 2345 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
2450 drm_connector_attach_property( 2346 drm_connector_attach_property(&intel_sdvo_connector->base.base,
2451 connector, sdvo_connector->tv_format_property, 0); 2347 intel_sdvo_connector->tv_format, 0);
2348 return true;
2452 2349
2453} 2350}
2454 2351
2455static void intel_sdvo_create_enhance_property(struct drm_connector *connector) 2352#define ENHANCEMENT(name, NAME) do { \
2353 if (enhancements.name) { \
2354 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
2355 !intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
2356 return false; \
2357 intel_sdvo_connector->max_##name = data_value[0]; \
2358 intel_sdvo_connector->cur_##name = response; \
2359 intel_sdvo_connector->name = \
2360 drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
2361 if (!intel_sdvo_connector->name) return false; \
2362 intel_sdvo_connector->name->values[0] = 0; \
2363 intel_sdvo_connector->name->values[1] = data_value[0]; \
2364 drm_connector_attach_property(connector, \
2365 intel_sdvo_connector->name, \
2366 intel_sdvo_connector->cur_##name); \
2367 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
2368 data_value[0], data_value[1], response); \
2369 } \
2370} while(0)
2371
2372static bool
2373intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2374 struct intel_sdvo_connector *intel_sdvo_connector,
2375 struct intel_sdvo_enhancements_reply enhancements)
2456{ 2376{
2457 struct drm_encoder *encoder = intel_attached_encoder(connector); 2377 struct drm_device *dev = intel_sdvo->base.enc.dev;
2458 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 2378 struct drm_connector *connector = &intel_sdvo_connector->base.base;
2459 struct intel_connector *intel_connector = to_intel_connector(connector);
2460 struct intel_sdvo_connector *sdvo_priv = intel_connector->dev_priv;
2461 struct intel_sdvo_enhancements_reply sdvo_data;
2462 struct drm_device *dev = connector->dev;
2463 uint8_t status;
2464 uint16_t response, data_value[2]; 2379 uint16_t response, data_value[2];
2465 2380
2466 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2381 /* when horizontal overscan is supported, Add the left/right property */
2467 NULL, 0); 2382 if (enhancements.overscan_h) {
2468 status = intel_sdvo_read_response(intel_encoder, &sdvo_data, 2383 if (!intel_sdvo_get_value(intel_sdvo,
2469 sizeof(sdvo_data)); 2384 SDVO_CMD_GET_MAX_OVERSCAN_H,
2470 if (status != SDVO_CMD_STATUS_SUCCESS) { 2385 &data_value, 4))
2471 DRM_DEBUG_KMS(" incorrect response is returned\n"); 2386 return false;
2472 return; 2387
2388 if (!intel_sdvo_get_value(intel_sdvo,
2389 SDVO_CMD_GET_OVERSCAN_H,
2390 &response, 2))
2391 return false;
2392
2393 intel_sdvo_connector->max_hscan = data_value[0];
2394 intel_sdvo_connector->left_margin = data_value[0] - response;
2395 intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
2396 intel_sdvo_connector->left =
2397 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2398 "left_margin", 2);
2399 if (!intel_sdvo_connector->left)
2400 return false;
2401
2402 intel_sdvo_connector->left->values[0] = 0;
2403 intel_sdvo_connector->left->values[1] = data_value[0];
2404 drm_connector_attach_property(connector,
2405 intel_sdvo_connector->left,
2406 intel_sdvo_connector->left_margin);
2407
2408 intel_sdvo_connector->right =
2409 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2410 "right_margin", 2);
2411 if (!intel_sdvo_connector->right)
2412 return false;
2413
2414 intel_sdvo_connector->right->values[0] = 0;
2415 intel_sdvo_connector->right->values[1] = data_value[0];
2416 drm_connector_attach_property(connector,
2417 intel_sdvo_connector->right,
2418 intel_sdvo_connector->right_margin);
2419 DRM_DEBUG_KMS("h_overscan: max %d, "
2420 "default %d, current %d\n",
2421 data_value[0], data_value[1], response);
2473 } 2422 }
2474 response = *((uint16_t *)&sdvo_data); 2423
2475 if (!response) { 2424 if (enhancements.overscan_v) {
2476 DRM_DEBUG_KMS("No enhancement is supported\n"); 2425 if (!intel_sdvo_get_value(intel_sdvo,
2477 return; 2426 SDVO_CMD_GET_MAX_OVERSCAN_V,
2427 &data_value, 4))
2428 return false;
2429
2430 if (!intel_sdvo_get_value(intel_sdvo,
2431 SDVO_CMD_GET_OVERSCAN_V,
2432 &response, 2))
2433 return false;
2434
2435 intel_sdvo_connector->max_vscan = data_value[0];
2436 intel_sdvo_connector->top_margin = data_value[0] - response;
2437 intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
2438 intel_sdvo_connector->top =
2439 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2440 "top_margin", 2);
2441 if (!intel_sdvo_connector->top)
2442 return false;
2443
2444 intel_sdvo_connector->top->values[0] = 0;
2445 intel_sdvo_connector->top->values[1] = data_value[0];
2446 drm_connector_attach_property(connector,
2447 intel_sdvo_connector->top,
2448 intel_sdvo_connector->top_margin);
2449
2450 intel_sdvo_connector->bottom =
2451 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2452 "bottom_margin", 2);
2453 if (!intel_sdvo_connector->bottom)
2454 return false;
2455
2456 intel_sdvo_connector->bottom->values[0] = 0;
2457 intel_sdvo_connector->bottom->values[1] = data_value[0];
2458 drm_connector_attach_property(connector,
2459 intel_sdvo_connector->bottom,
2460 intel_sdvo_connector->bottom_margin);
2461 DRM_DEBUG_KMS("v_overscan: max %d, "
2462 "default %d, current %d\n",
2463 data_value[0], data_value[1], response);
2478 } 2464 }
2479 if (IS_TV(sdvo_priv)) { 2465
2480 /* when horizontal overscan is supported, Add the left/right 2466 ENHANCEMENT(hpos, HPOS);
2481 * property 2467 ENHANCEMENT(vpos, VPOS);
2482 */ 2468 ENHANCEMENT(saturation, SATURATION);
2483 if (sdvo_data.overscan_h) { 2469 ENHANCEMENT(contrast, CONTRAST);
2484 intel_sdvo_write_cmd(intel_encoder, 2470 ENHANCEMENT(hue, HUE);
2485 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); 2471 ENHANCEMENT(sharpness, SHARPNESS);
2486 status = intel_sdvo_read_response(intel_encoder, 2472 ENHANCEMENT(brightness, BRIGHTNESS);
2487 &data_value, 4); 2473 ENHANCEMENT(flicker_filter, FLICKER_FILTER);
2488 if (status != SDVO_CMD_STATUS_SUCCESS) { 2474 ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
2489 DRM_DEBUG_KMS("Incorrect SDVO max " 2475 ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
2490 "h_overscan\n"); 2476 ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
2491 return; 2477 ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
2492 } 2478
2493 intel_sdvo_write_cmd(intel_encoder, 2479 if (enhancements.dot_crawl) {
2494 SDVO_CMD_GET_OVERSCAN_H, NULL, 0); 2480 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
2495 status = intel_sdvo_read_response(intel_encoder, 2481 return false;
2496 &response, 2); 2482
2497 if (status != SDVO_CMD_STATUS_SUCCESS) { 2483 intel_sdvo_connector->max_dot_crawl = 1;
2498 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); 2484 intel_sdvo_connector->cur_dot_crawl = response & 0x1;
2499 return; 2485 intel_sdvo_connector->dot_crawl =
2500 } 2486 drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
2501 sdvo_priv->max_hscan = data_value[0]; 2487 if (!intel_sdvo_connector->dot_crawl)
2502 sdvo_priv->left_margin = data_value[0] - response; 2488 return false;
2503 sdvo_priv->right_margin = sdvo_priv->left_margin; 2489
2504 sdvo_priv->left_property = 2490 intel_sdvo_connector->dot_crawl->values[0] = 0;
2505 drm_property_create(dev, DRM_MODE_PROP_RANGE, 2491 intel_sdvo_connector->dot_crawl->values[1] = 1;
2506 "left_margin", 2); 2492 drm_connector_attach_property(connector,
2507 sdvo_priv->left_property->values[0] = 0; 2493 intel_sdvo_connector->dot_crawl,
2508 sdvo_priv->left_property->values[1] = data_value[0]; 2494 intel_sdvo_connector->cur_dot_crawl);
2509 drm_connector_attach_property(connector, 2495 DRM_DEBUG_KMS("dot crawl: current %d\n", response);
2510 sdvo_priv->left_property,
2511 sdvo_priv->left_margin);
2512 sdvo_priv->right_property =
2513 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2514 "right_margin", 2);
2515 sdvo_priv->right_property->values[0] = 0;
2516 sdvo_priv->right_property->values[1] = data_value[0];
2517 drm_connector_attach_property(connector,
2518 sdvo_priv->right_property,
2519 sdvo_priv->right_margin);
2520 DRM_DEBUG_KMS("h_overscan: max %d, "
2521 "default %d, current %d\n",
2522 data_value[0], data_value[1], response);
2523 }
2524 if (sdvo_data.overscan_v) {
2525 intel_sdvo_write_cmd(intel_encoder,
2526 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
2527 status = intel_sdvo_read_response(intel_encoder,
2528 &data_value, 4);
2529 if (status != SDVO_CMD_STATUS_SUCCESS) {
2530 DRM_DEBUG_KMS("Incorrect SDVO max "
2531 "v_overscan\n");
2532 return;
2533 }
2534 intel_sdvo_write_cmd(intel_encoder,
2535 SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
2536 status = intel_sdvo_read_response(intel_encoder,
2537 &response, 2);
2538 if (status != SDVO_CMD_STATUS_SUCCESS) {
2539 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
2540 return;
2541 }
2542 sdvo_priv->max_vscan = data_value[0];
2543 sdvo_priv->top_margin = data_value[0] - response;
2544 sdvo_priv->bottom_margin = sdvo_priv->top_margin;
2545 sdvo_priv->top_property =
2546 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2547 "top_margin", 2);
2548 sdvo_priv->top_property->values[0] = 0;
2549 sdvo_priv->top_property->values[1] = data_value[0];
2550 drm_connector_attach_property(connector,
2551 sdvo_priv->top_property,
2552 sdvo_priv->top_margin);
2553 sdvo_priv->bottom_property =
2554 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2555 "bottom_margin", 2);
2556 sdvo_priv->bottom_property->values[0] = 0;
2557 sdvo_priv->bottom_property->values[1] = data_value[0];
2558 drm_connector_attach_property(connector,
2559 sdvo_priv->bottom_property,
2560 sdvo_priv->bottom_margin);
2561 DRM_DEBUG_KMS("v_overscan: max %d, "
2562 "default %d, current %d\n",
2563 data_value[0], data_value[1], response);
2564 }
2565 if (sdvo_data.position_h) {
2566 intel_sdvo_write_cmd(intel_encoder,
2567 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
2568 status = intel_sdvo_read_response(intel_encoder,
2569 &data_value, 4);
2570 if (status != SDVO_CMD_STATUS_SUCCESS) {
2571 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
2572 return;
2573 }
2574 intel_sdvo_write_cmd(intel_encoder,
2575 SDVO_CMD_GET_POSITION_H, NULL, 0);
2576 status = intel_sdvo_read_response(intel_encoder,
2577 &response, 2);
2578 if (status != SDVO_CMD_STATUS_SUCCESS) {
2579 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
2580 return;
2581 }
2582 sdvo_priv->max_hpos = data_value[0];
2583 sdvo_priv->cur_hpos = response;
2584 sdvo_priv->hpos_property =
2585 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2586 "hpos", 2);
2587 sdvo_priv->hpos_property->values[0] = 0;
2588 sdvo_priv->hpos_property->values[1] = data_value[0];
2589 drm_connector_attach_property(connector,
2590 sdvo_priv->hpos_property,
2591 sdvo_priv->cur_hpos);
2592 DRM_DEBUG_KMS("h_position: max %d, "
2593 "default %d, current %d\n",
2594 data_value[0], data_value[1], response);
2595 }
2596 if (sdvo_data.position_v) {
2597 intel_sdvo_write_cmd(intel_encoder,
2598 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
2599 status = intel_sdvo_read_response(intel_encoder,
2600 &data_value, 4);
2601 if (status != SDVO_CMD_STATUS_SUCCESS) {
2602 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
2603 return;
2604 }
2605 intel_sdvo_write_cmd(intel_encoder,
2606 SDVO_CMD_GET_POSITION_V, NULL, 0);
2607 status = intel_sdvo_read_response(intel_encoder,
2608 &response, 2);
2609 if (status != SDVO_CMD_STATUS_SUCCESS) {
2610 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
2611 return;
2612 }
2613 sdvo_priv->max_vpos = data_value[0];
2614 sdvo_priv->cur_vpos = response;
2615 sdvo_priv->vpos_property =
2616 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2617 "vpos", 2);
2618 sdvo_priv->vpos_property->values[0] = 0;
2619 sdvo_priv->vpos_property->values[1] = data_value[0];
2620 drm_connector_attach_property(connector,
2621 sdvo_priv->vpos_property,
2622 sdvo_priv->cur_vpos);
2623 DRM_DEBUG_KMS("v_position: max %d, "
2624 "default %d, current %d\n",
2625 data_value[0], data_value[1], response);
2626 }
2627 if (sdvo_data.saturation) {
2628 intel_sdvo_write_cmd(intel_encoder,
2629 SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
2630 status = intel_sdvo_read_response(intel_encoder,
2631 &data_value, 4);
2632 if (status != SDVO_CMD_STATUS_SUCCESS) {
2633 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
2634 return;
2635 }
2636 intel_sdvo_write_cmd(intel_encoder,
2637 SDVO_CMD_GET_SATURATION, NULL, 0);
2638 status = intel_sdvo_read_response(intel_encoder,
2639 &response, 2);
2640 if (status != SDVO_CMD_STATUS_SUCCESS) {
2641 DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
2642 return;
2643 }
2644 sdvo_priv->max_saturation = data_value[0];
2645 sdvo_priv->cur_saturation = response;
2646 sdvo_priv->saturation_property =
2647 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2648 "saturation", 2);
2649 sdvo_priv->saturation_property->values[0] = 0;
2650 sdvo_priv->saturation_property->values[1] =
2651 data_value[0];
2652 drm_connector_attach_property(connector,
2653 sdvo_priv->saturation_property,
2654 sdvo_priv->cur_saturation);
2655 DRM_DEBUG_KMS("saturation: max %d, "
2656 "default %d, current %d\n",
2657 data_value[0], data_value[1], response);
2658 }
2659 if (sdvo_data.contrast) {
2660 intel_sdvo_write_cmd(intel_encoder,
2661 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
2662 status = intel_sdvo_read_response(intel_encoder,
2663 &data_value, 4);
2664 if (status != SDVO_CMD_STATUS_SUCCESS) {
2665 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
2666 return;
2667 }
2668 intel_sdvo_write_cmd(intel_encoder,
2669 SDVO_CMD_GET_CONTRAST, NULL, 0);
2670 status = intel_sdvo_read_response(intel_encoder,
2671 &response, 2);
2672 if (status != SDVO_CMD_STATUS_SUCCESS) {
2673 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
2674 return;
2675 }
2676 sdvo_priv->max_contrast = data_value[0];
2677 sdvo_priv->cur_contrast = response;
2678 sdvo_priv->contrast_property =
2679 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2680 "contrast", 2);
2681 sdvo_priv->contrast_property->values[0] = 0;
2682 sdvo_priv->contrast_property->values[1] = data_value[0];
2683 drm_connector_attach_property(connector,
2684 sdvo_priv->contrast_property,
2685 sdvo_priv->cur_contrast);
2686 DRM_DEBUG_KMS("contrast: max %d, "
2687 "default %d, current %d\n",
2688 data_value[0], data_value[1], response);
2689 }
2690 if (sdvo_data.hue) {
2691 intel_sdvo_write_cmd(intel_encoder,
2692 SDVO_CMD_GET_MAX_HUE, NULL, 0);
2693 status = intel_sdvo_read_response(intel_encoder,
2694 &data_value, 4);
2695 if (status != SDVO_CMD_STATUS_SUCCESS) {
2696 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
2697 return;
2698 }
2699 intel_sdvo_write_cmd(intel_encoder,
2700 SDVO_CMD_GET_HUE, NULL, 0);
2701 status = intel_sdvo_read_response(intel_encoder,
2702 &response, 2);
2703 if (status != SDVO_CMD_STATUS_SUCCESS) {
2704 DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
2705 return;
2706 }
2707 sdvo_priv->max_hue = data_value[0];
2708 sdvo_priv->cur_hue = response;
2709 sdvo_priv->hue_property =
2710 drm_property_create(dev, DRM_MODE_PROP_RANGE,
2711 "hue", 2);
2712 sdvo_priv->hue_property->values[0] = 0;
2713 sdvo_priv->hue_property->values[1] =
2714 data_value[0];
2715 drm_connector_attach_property(connector,
2716 sdvo_priv->hue_property,
2717 sdvo_priv->cur_hue);
2718 DRM_DEBUG_KMS("hue: max %d, default %d, current %d\n",
2719 data_value[0], data_value[1], response);
2720 }
2721 } 2496 }
2722 if (IS_TV(sdvo_priv) || IS_LVDS(sdvo_priv)) { 2497
2723 if (sdvo_data.brightness) { 2498 return true;
2724 intel_sdvo_write_cmd(intel_encoder, 2499}
2725 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); 2500
2726 status = intel_sdvo_read_response(intel_encoder, 2501static bool
2727 &data_value, 4); 2502intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
2728 if (status != SDVO_CMD_STATUS_SUCCESS) { 2503 struct intel_sdvo_connector *intel_sdvo_connector,
2729 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); 2504 struct intel_sdvo_enhancements_reply enhancements)
2730 return; 2505{
2731 } 2506 struct drm_device *dev = intel_sdvo->base.enc.dev;
2732 intel_sdvo_write_cmd(intel_encoder, 2507 struct drm_connector *connector = &intel_sdvo_connector->base.base;
2733 SDVO_CMD_GET_BRIGHTNESS, NULL, 0); 2508 uint16_t response, data_value[2];
2734 status = intel_sdvo_read_response(intel_encoder, 2509
2735 &response, 2); 2510 ENHANCEMENT(brightness, BRIGHTNESS);
2736 if (status != SDVO_CMD_STATUS_SUCCESS) { 2511
2737 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); 2512 return true;
2738 return; 2513}
2739 } 2514#undef ENHANCEMENT
2740 sdvo_priv->max_brightness = data_value[0]; 2515
2741 sdvo_priv->cur_brightness = response; 2516static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
2742 sdvo_priv->brightness_property = 2517 struct intel_sdvo_connector *intel_sdvo_connector)
2743 drm_property_create(dev, DRM_MODE_PROP_RANGE, 2518{
2744 "brightness", 2); 2519 union {
2745 sdvo_priv->brightness_property->values[0] = 0; 2520 struct intel_sdvo_enhancements_reply reply;
2746 sdvo_priv->brightness_property->values[1] = 2521 uint16_t response;
2747 data_value[0]; 2522 } enhancements;
2748 drm_connector_attach_property(connector, 2523
2749 sdvo_priv->brightness_property, 2524 if (!intel_sdvo_get_value(intel_sdvo,
2750 sdvo_priv->cur_brightness); 2525 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2751 DRM_DEBUG_KMS("brightness: max %d, " 2526 &enhancements, sizeof(enhancements)))
2752 "default %d, current %d\n", 2527 return false;
2753 data_value[0], data_value[1], response); 2528
2754 } 2529 if (enhancements.response == 0) {
2530 DRM_DEBUG_KMS("No enhancement is supported\n");
2531 return true;
2755 } 2532 }
2756 return; 2533
2534 if (IS_TV(intel_sdvo_connector))
2535 return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
2536 else if(IS_LVDS(intel_sdvo_connector))
2537 return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
2538 else
2539 return true;
2540
2757} 2541}
2758 2542
2759bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2543bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2760{ 2544{
2761 struct drm_i915_private *dev_priv = dev->dev_private; 2545 struct drm_i915_private *dev_priv = dev->dev_private;
2762 struct intel_encoder *intel_encoder; 2546 struct intel_encoder *intel_encoder;
2763 struct intel_sdvo_priv *sdvo_priv; 2547 struct intel_sdvo *intel_sdvo;
2764 u8 ch[0x40]; 2548 u8 ch[0x40];
2765 int i; 2549 int i;
2766 u32 i2c_reg, ddc_reg, analog_ddc_reg; 2550 u32 i2c_reg, ddc_reg, analog_ddc_reg;
2767 2551
2768 intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 2552 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
2769 if (!intel_encoder) { 2553 if (!intel_sdvo)
2770 return false; 2554 return false;
2771 }
2772 2555
2773 sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1); 2556 intel_sdvo->sdvo_reg = sdvo_reg;
2774 sdvo_priv->sdvo_reg = sdvo_reg;
2775 2557
2776 intel_encoder->dev_priv = sdvo_priv; 2558 intel_encoder = &intel_sdvo->base;
2777 intel_encoder->type = INTEL_OUTPUT_SDVO; 2559 intel_encoder->type = INTEL_OUTPUT_SDVO;
2778 2560
2779 if (HAS_PCH_SPLIT(dev)) { 2561 if (HAS_PCH_SPLIT(dev)) {
@@ -2795,14 +2577,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2795 if (!intel_encoder->i2c_bus) 2577 if (!intel_encoder->i2c_bus)
2796 goto err_inteloutput; 2578 goto err_inteloutput;
2797 2579
2798 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg); 2580 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
2799 2581
2800 /* Save the bit-banging i2c functionality for use by the DDC wrapper */ 2582 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
2801 intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality; 2583 intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
2802 2584
2803 /* Read the regs to test if we can talk to the device */ 2585 /* Read the regs to test if we can talk to the device */
2804 for (i = 0; i < 0x40; i++) { 2586 for (i = 0; i < 0x40; i++) {
2805 if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) { 2587 if (!intel_sdvo_read_byte(intel_sdvo, i, &ch[i])) {
2806 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2588 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2807 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2589 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2808 goto err_i2c; 2590 goto err_i2c;
@@ -2812,17 +2594,16 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2812 /* setup the DDC bus. */ 2594 /* setup the DDC bus. */
2813 if (IS_SDVOB(sdvo_reg)) { 2595 if (IS_SDVOB(sdvo_reg)) {
2814 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS"); 2596 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOB DDC BUS");
2815 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, 2597 intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2816 "SDVOB/VGA DDC BUS"); 2598 "SDVOB/VGA DDC BUS");
2817 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2599 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2818 } else { 2600 } else {
2819 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS"); 2601 intel_encoder->ddc_bus = intel_i2c_create(dev, ddc_reg, "SDVOC DDC BUS");
2820 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg, 2602 intel_sdvo->analog_ddc_bus = intel_i2c_create(dev, analog_ddc_reg,
2821 "SDVOC/VGA DDC BUS"); 2603 "SDVOC/VGA DDC BUS");
2822 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2604 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2823 } 2605 }
2824 2606 if (intel_encoder->ddc_bus == NULL || intel_sdvo->analog_ddc_bus == NULL)
2825 if (intel_encoder->ddc_bus == NULL)
2826 goto err_i2c; 2607 goto err_i2c;
2827 2608
2828 /* Wrap with our custom algo which switches to DDC mode */ 2609 /* Wrap with our custom algo which switches to DDC mode */
@@ -2833,53 +2614,56 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2833 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs); 2614 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2834 2615
2835 /* In default case sdvo lvds is false */ 2616 /* In default case sdvo lvds is false */
2836 intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps); 2617 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
2618 goto err_enc;
2837 2619
2838 if (intel_sdvo_output_setup(intel_encoder, 2620 if (intel_sdvo_output_setup(intel_sdvo,
2839 sdvo_priv->caps.output_flags) != true) { 2621 intel_sdvo->caps.output_flags) != true) {
2840 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2622 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2841 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2623 IS_SDVOB(sdvo_reg) ? 'B' : 'C');
2842 goto err_i2c; 2624 goto err_enc;
2843 } 2625 }
2844 2626
2845 intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg); 2627 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
2846 2628
2847 /* Set the input timing to the screen. Assume always input 0. */ 2629 /* Set the input timing to the screen. Assume always input 0. */
2848 intel_sdvo_set_target_input(intel_encoder, true, false); 2630 if (!intel_sdvo_set_target_input(intel_sdvo))
2849 2631 goto err_enc;
2850 intel_sdvo_get_input_pixel_clock_range(intel_encoder,
2851 &sdvo_priv->pixel_clock_min,
2852 &sdvo_priv->pixel_clock_max);
2853 2632
2633 if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
2634 &intel_sdvo->pixel_clock_min,
2635 &intel_sdvo->pixel_clock_max))
2636 goto err_enc;
2854 2637
2855 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " 2638 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
2856 "clock range %dMHz - %dMHz, " 2639 "clock range %dMHz - %dMHz, "
2857 "input 1: %c, input 2: %c, " 2640 "input 1: %c, input 2: %c, "
2858 "output 1: %c, output 2: %c\n", 2641 "output 1: %c, output 2: %c\n",
2859 SDVO_NAME(sdvo_priv), 2642 SDVO_NAME(intel_sdvo),
2860 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, 2643 intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
2861 sdvo_priv->caps.device_rev_id, 2644 intel_sdvo->caps.device_rev_id,
2862 sdvo_priv->pixel_clock_min / 1000, 2645 intel_sdvo->pixel_clock_min / 1000,
2863 sdvo_priv->pixel_clock_max / 1000, 2646 intel_sdvo->pixel_clock_max / 1000,
2864 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', 2647 (intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
2865 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', 2648 (intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
2866 /* check currently supported outputs */ 2649 /* check currently supported outputs */
2867 sdvo_priv->caps.output_flags & 2650 intel_sdvo->caps.output_flags &
2868 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', 2651 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
2869 sdvo_priv->caps.output_flags & 2652 intel_sdvo->caps.output_flags &
2870 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 2653 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
2871
2872 return true; 2654 return true;
2873 2655
2656err_enc:
2657 drm_encoder_cleanup(&intel_encoder->enc);
2874err_i2c: 2658err_i2c:
2875 if (sdvo_priv->analog_ddc_bus != NULL) 2659 if (intel_sdvo->analog_ddc_bus != NULL)
2876 intel_i2c_destroy(sdvo_priv->analog_ddc_bus); 2660 intel_i2c_destroy(intel_sdvo->analog_ddc_bus);
2877 if (intel_encoder->ddc_bus != NULL) 2661 if (intel_encoder->ddc_bus != NULL)
2878 intel_i2c_destroy(intel_encoder->ddc_bus); 2662 intel_i2c_destroy(intel_encoder->ddc_bus);
2879 if (intel_encoder->i2c_bus != NULL) 2663 if (intel_encoder->i2c_bus != NULL)
2880 intel_i2c_destroy(intel_encoder->i2c_bus); 2664 intel_i2c_destroy(intel_encoder->i2c_bus);
2881err_inteloutput: 2665err_inteloutput:
2882 kfree(intel_encoder); 2666 kfree(intel_sdvo);
2883 2667
2884 return false; 2668 return false;
2885} 2669}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index ba5cdf8ae40b..a386b022e538 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -312,7 +312,7 @@ struct intel_sdvo_set_target_input_args {
312# define SDVO_CLOCK_RATE_MULT_4X (1 << 3) 312# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
313 313
314#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 314#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
315/** 5 bytes of bit flags for TV formats shared by all TV format functions */ 315/** 6 bytes of bit flags for TV formats shared by all TV format functions */
316struct intel_sdvo_tv_format { 316struct intel_sdvo_tv_format {
317 unsigned int ntsc_m:1; 317 unsigned int ntsc_m:1;
318 unsigned int ntsc_j:1; 318 unsigned int ntsc_j:1;
@@ -596,32 +596,32 @@ struct intel_sdvo_enhancements_reply {
596 unsigned int overscan_h:1; 596 unsigned int overscan_h:1;
597 597
598 unsigned int overscan_v:1; 598 unsigned int overscan_v:1;
599 unsigned int position_h:1; 599 unsigned int hpos:1;
600 unsigned int position_v:1; 600 unsigned int vpos:1;
601 unsigned int sharpness:1; 601 unsigned int sharpness:1;
602 unsigned int dot_crawl:1; 602 unsigned int dot_crawl:1;
603 unsigned int dither:1; 603 unsigned int dither:1;
604 unsigned int max_tv_chroma_filter:1; 604 unsigned int tv_chroma_filter:1;
605 unsigned int max_tv_luma_filter:1; 605 unsigned int tv_luma_filter:1;
606} __attribute__((packed)); 606} __attribute__((packed));
607 607
608/* Picture enhancement limits below are dependent on the current TV format, 608/* Picture enhancement limits below are dependent on the current TV format,
609 * and thus need to be queried and set after it. 609 * and thus need to be queried and set after it.
610 */ 610 */
611#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d 611#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
612#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b 612#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
613#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52 613#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
614#define SDVO_CMD_GET_MAX_SATURATION 0x55 614#define SDVO_CMD_GET_MAX_SATURATION 0x55
615#define SDVO_CMD_GET_MAX_HUE 0x58 615#define SDVO_CMD_GET_MAX_HUE 0x58
616#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b 616#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
617#define SDVO_CMD_GET_MAX_CONTRAST 0x5e 617#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
618#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61 618#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
619#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64 619#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
620#define SDVO_CMD_GET_MAX_POSITION_H 0x67 620#define SDVO_CMD_GET_MAX_HPOS 0x67
621#define SDVO_CMD_GET_MAX_POSITION_V 0x6a 621#define SDVO_CMD_GET_MAX_VPOS 0x6a
622#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d 622#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
623#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74 623#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
624#define SDVO_CMD_GET_MAX_TV_LUMA 0x77 624#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
625struct intel_sdvo_enhancement_limits_reply { 625struct intel_sdvo_enhancement_limits_reply {
626 u16 max_value; 626 u16 max_value;
627 u16 default_value; 627 u16 default_value;
@@ -638,10 +638,10 @@ struct intel_sdvo_enhancement_limits_reply {
638 638
639#define SDVO_CMD_GET_FLICKER_FILTER 0x4e 639#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
640#define SDVO_CMD_SET_FLICKER_FILTER 0x4f 640#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
641#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50 641#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
642#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51 642#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
643#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53 643#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
644#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54 644#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
645#define SDVO_CMD_GET_SATURATION 0x56 645#define SDVO_CMD_GET_SATURATION 0x56
646#define SDVO_CMD_SET_SATURATION 0x57 646#define SDVO_CMD_SET_SATURATION 0x57
647#define SDVO_CMD_GET_HUE 0x59 647#define SDVO_CMD_GET_HUE 0x59
@@ -654,16 +654,16 @@ struct intel_sdvo_enhancement_limits_reply {
654#define SDVO_CMD_SET_OVERSCAN_H 0x63 654#define SDVO_CMD_SET_OVERSCAN_H 0x63
655#define SDVO_CMD_GET_OVERSCAN_V 0x65 655#define SDVO_CMD_GET_OVERSCAN_V 0x65
656#define SDVO_CMD_SET_OVERSCAN_V 0x66 656#define SDVO_CMD_SET_OVERSCAN_V 0x66
657#define SDVO_CMD_GET_POSITION_H 0x68 657#define SDVO_CMD_GET_HPOS 0x68
658#define SDVO_CMD_SET_POSITION_H 0x69 658#define SDVO_CMD_SET_HPOS 0x69
659#define SDVO_CMD_GET_POSITION_V 0x6b 659#define SDVO_CMD_GET_VPOS 0x6b
660#define SDVO_CMD_SET_POSITION_V 0x6c 660#define SDVO_CMD_SET_VPOS 0x6c
661#define SDVO_CMD_GET_SHARPNESS 0x6e 661#define SDVO_CMD_GET_SHARPNESS 0x6e
662#define SDVO_CMD_SET_SHARPNESS 0x6f 662#define SDVO_CMD_SET_SHARPNESS 0x6f
663#define SDVO_CMD_GET_TV_CHROMA 0x75 663#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
664#define SDVO_CMD_SET_TV_CHROMA 0x76 664#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
665#define SDVO_CMD_GET_TV_LUMA 0x78 665#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
666#define SDVO_CMD_SET_TV_LUMA 0x79 666#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
667struct intel_sdvo_enhancements_arg { 667struct intel_sdvo_enhancements_arg {
668 u16 value; 668 u16 value;
669}__attribute__((packed)); 669}__attribute__((packed));
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index cc3726a4a1cb..c671f60ce80b 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -44,7 +44,9 @@ enum tv_margin {
44}; 44};
45 45
46/** Private structure for the integrated TV support */ 46/** Private structure for the integrated TV support */
47struct intel_tv_priv { 47struct intel_tv {
48 struct intel_encoder base;
49
48 int type; 50 int type;
49 char *tv_format; 51 char *tv_format;
50 int margin[4]; 52 int margin[4];
@@ -896,6 +898,11 @@ static const struct tv_mode tv_modes[] = {
896 }, 898 },
897}; 899};
898 900
901static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
902{
903 return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
904}
905
899static void 906static void
900intel_tv_dpms(struct drm_encoder *encoder, int mode) 907intel_tv_dpms(struct drm_encoder *encoder, int mode)
901{ 908{
@@ -929,19 +936,17 @@ intel_tv_mode_lookup (char *tv_format)
929} 936}
930 937
931static const struct tv_mode * 938static const struct tv_mode *
932intel_tv_mode_find (struct intel_encoder *intel_encoder) 939intel_tv_mode_find (struct intel_tv *intel_tv)
933{ 940{
934 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 941 return intel_tv_mode_lookup(intel_tv->tv_format);
935
936 return intel_tv_mode_lookup(tv_priv->tv_format);
937} 942}
938 943
939static enum drm_mode_status 944static enum drm_mode_status
940intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) 945intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
941{ 946{
942 struct drm_encoder *encoder = intel_attached_encoder(connector); 947 struct drm_encoder *encoder = intel_attached_encoder(connector);
943 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 948 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
944 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 949 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
945 950
946 /* Ensure TV refresh is close to desired refresh */ 951 /* Ensure TV refresh is close to desired refresh */
947 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) 952 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -957,8 +962,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
957{ 962{
958 struct drm_device *dev = encoder->dev; 963 struct drm_device *dev = encoder->dev;
959 struct drm_mode_config *drm_config = &dev->mode_config; 964 struct drm_mode_config *drm_config = &dev->mode_config;
960 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 965 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
961 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder); 966 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
962 struct drm_encoder *other_encoder; 967 struct drm_encoder *other_encoder;
963 968
964 if (!tv_mode) 969 if (!tv_mode)
@@ -983,9 +988,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
983 struct drm_i915_private *dev_priv = dev->dev_private; 988 struct drm_i915_private *dev_priv = dev->dev_private;
984 struct drm_crtc *crtc = encoder->crtc; 989 struct drm_crtc *crtc = encoder->crtc;
985 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 990 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
986 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 991 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
987 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 992 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
988 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
989 u32 tv_ctl; 993 u32 tv_ctl;
990 u32 hctl1, hctl2, hctl3; 994 u32 hctl1, hctl2, hctl3;
991 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; 995 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1001,7 +1005,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1001 tv_ctl = I915_READ(TV_CTL); 1005 tv_ctl = I915_READ(TV_CTL);
1002 tv_ctl &= TV_CTL_SAVE; 1006 tv_ctl &= TV_CTL_SAVE;
1003 1007
1004 switch (tv_priv->type) { 1008 switch (intel_tv->type) {
1005 default: 1009 default:
1006 case DRM_MODE_CONNECTOR_Unknown: 1010 case DRM_MODE_CONNECTOR_Unknown:
1007 case DRM_MODE_CONNECTOR_Composite: 1011 case DRM_MODE_CONNECTOR_Composite:
@@ -1154,11 +1158,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1154 1158
1155 /* Wait for vblank for the disable to take effect */ 1159 /* Wait for vblank for the disable to take effect */
1156 if (!IS_I9XX(dev)) 1160 if (!IS_I9XX(dev))
1157 intel_wait_for_vblank(dev); 1161 intel_wait_for_vblank(dev, intel_crtc->pipe);
1158 1162
1159 I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE); 1163 I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
1160 /* Wait for vblank for the disable to take effect. */ 1164 /* Wait for vblank for the disable to take effect. */
1161 intel_wait_for_vblank(dev); 1165 intel_wait_for_vblank(dev, intel_crtc->pipe);
1162 1166
1163 /* Filter ctl must be set before TV_WIN_SIZE */ 1167 /* Filter ctl must be set before TV_WIN_SIZE */
1164 I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE); 1168 I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1168,12 +1172,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1168 else 1172 else
1169 ysize = 2*tv_mode->nbr_end + 1; 1173 ysize = 2*tv_mode->nbr_end + 1;
1170 1174
1171 xpos += tv_priv->margin[TV_MARGIN_LEFT]; 1175 xpos += intel_tv->margin[TV_MARGIN_LEFT];
1172 ypos += tv_priv->margin[TV_MARGIN_TOP]; 1176 ypos += intel_tv->margin[TV_MARGIN_TOP];
1173 xsize -= (tv_priv->margin[TV_MARGIN_LEFT] + 1177 xsize -= (intel_tv->margin[TV_MARGIN_LEFT] +
1174 tv_priv->margin[TV_MARGIN_RIGHT]); 1178 intel_tv->margin[TV_MARGIN_RIGHT]);
1175 ysize -= (tv_priv->margin[TV_MARGIN_TOP] + 1179 ysize -= (intel_tv->margin[TV_MARGIN_TOP] +
1176 tv_priv->margin[TV_MARGIN_BOTTOM]); 1180 intel_tv->margin[TV_MARGIN_BOTTOM]);
1177 I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos); 1181 I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
1178 I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize); 1182 I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
1179 1183
@@ -1222,9 +1226,9 @@ static const struct drm_display_mode reported_modes[] = {
1222 * \return false if TV is disconnected. 1226 * \return false if TV is disconnected.
1223 */ 1227 */
1224static int 1228static int
1225intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder) 1229intel_tv_detect_type (struct intel_tv *intel_tv)
1226{ 1230{
1227 struct drm_encoder *encoder = &intel_encoder->enc; 1231 struct drm_encoder *encoder = &intel_tv->base.enc;
1228 struct drm_device *dev = encoder->dev; 1232 struct drm_device *dev = encoder->dev;
1229 struct drm_i915_private *dev_priv = dev->dev_private; 1233 struct drm_i915_private *dev_priv = dev->dev_private;
1230 unsigned long irqflags; 1234 unsigned long irqflags;
@@ -1263,11 +1267,15 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
1263 DAC_C_0_7_V); 1267 DAC_C_0_7_V);
1264 I915_WRITE(TV_CTL, tv_ctl); 1268 I915_WRITE(TV_CTL, tv_ctl);
1265 I915_WRITE(TV_DAC, tv_dac); 1269 I915_WRITE(TV_DAC, tv_dac);
1266 intel_wait_for_vblank(dev); 1270 POSTING_READ(TV_DAC);
1271 msleep(20);
1272
1267 tv_dac = I915_READ(TV_DAC); 1273 tv_dac = I915_READ(TV_DAC);
1268 I915_WRITE(TV_DAC, save_tv_dac); 1274 I915_WRITE(TV_DAC, save_tv_dac);
1269 I915_WRITE(TV_CTL, save_tv_ctl); 1275 I915_WRITE(TV_CTL, save_tv_ctl);
1270 intel_wait_for_vblank(dev); 1276 POSTING_READ(TV_CTL);
1277 msleep(20);
1278
1271 /* 1279 /*
1272 * A B C 1280 * A B C
1273 * 0 1 1 Composite 1281 * 0 1 1 Composite
@@ -1304,12 +1312,11 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder
1304static void intel_tv_find_better_format(struct drm_connector *connector) 1312static void intel_tv_find_better_format(struct drm_connector *connector)
1305{ 1313{
1306 struct drm_encoder *encoder = intel_attached_encoder(connector); 1314 struct drm_encoder *encoder = intel_attached_encoder(connector);
1307 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1315 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1308 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 1316 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
1309 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1310 int i; 1317 int i;
1311 1318
1312 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == 1319 if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
1313 tv_mode->component_only) 1320 tv_mode->component_only)
1314 return; 1321 return;
1315 1322
@@ -1317,12 +1324,12 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1317 for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) { 1324 for (i = 0; i < sizeof(tv_modes) / sizeof(*tv_modes); i++) {
1318 tv_mode = tv_modes + i; 1325 tv_mode = tv_modes + i;
1319 1326
1320 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == 1327 if ((intel_tv->type == DRM_MODE_CONNECTOR_Component) ==
1321 tv_mode->component_only) 1328 tv_mode->component_only)
1322 break; 1329 break;
1323 } 1330 }
1324 1331
1325 tv_priv->tv_format = tv_mode->name; 1332 intel_tv->tv_format = tv_mode->name;
1326 drm_connector_property_set_value(connector, 1333 drm_connector_property_set_value(connector,
1327 connector->dev->mode_config.tv_mode_property, i); 1334 connector->dev->mode_config.tv_mode_property, i);
1328} 1335}
@@ -1336,31 +1343,31 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1336static enum drm_connector_status 1343static enum drm_connector_status
1337intel_tv_detect(struct drm_connector *connector) 1344intel_tv_detect(struct drm_connector *connector)
1338{ 1345{
1339 struct drm_crtc *crtc;
1340 struct drm_display_mode mode; 1346 struct drm_display_mode mode;
1341 struct drm_encoder *encoder = intel_attached_encoder(connector); 1347 struct drm_encoder *encoder = intel_attached_encoder(connector);
1342 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1348 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1343 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv; 1349 int type;
1344 int dpms_mode;
1345 int type = tv_priv->type;
1346 1350
1347 mode = reported_modes[0]; 1351 mode = reported_modes[0];
1348 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1352 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1349 1353
1350 if (encoder->crtc && encoder->crtc->enabled) { 1354 if (encoder->crtc && encoder->crtc->enabled) {
1351 type = intel_tv_detect_type(encoder->crtc, intel_encoder); 1355 type = intel_tv_detect_type(intel_tv);
1352 } else { 1356 } else {
1353 crtc = intel_get_load_detect_pipe(intel_encoder, connector, 1357 struct drm_crtc *crtc;
1358 int dpms_mode;
1359
1360 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
1354 &mode, &dpms_mode); 1361 &mode, &dpms_mode);
1355 if (crtc) { 1362 if (crtc) {
1356 type = intel_tv_detect_type(crtc, intel_encoder); 1363 type = intel_tv_detect_type(intel_tv);
1357 intel_release_load_detect_pipe(intel_encoder, connector, 1364 intel_release_load_detect_pipe(&intel_tv->base, connector,
1358 dpms_mode); 1365 dpms_mode);
1359 } else 1366 } else
1360 type = -1; 1367 type = -1;
1361 } 1368 }
1362 1369
1363 tv_priv->type = type; 1370 intel_tv->type = type;
1364 1371
1365 if (type < 0) 1372 if (type < 0)
1366 return connector_status_disconnected; 1373 return connector_status_disconnected;
@@ -1391,8 +1398,8 @@ intel_tv_chose_preferred_modes(struct drm_connector *connector,
1391 struct drm_display_mode *mode_ptr) 1398 struct drm_display_mode *mode_ptr)
1392{ 1399{
1393 struct drm_encoder *encoder = intel_attached_encoder(connector); 1400 struct drm_encoder *encoder = intel_attached_encoder(connector);
1394 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1401 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1395 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 1402 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
1396 1403
1397 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) 1404 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
1398 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; 1405 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1417,8 +1424,8 @@ intel_tv_get_modes(struct drm_connector *connector)
1417{ 1424{
1418 struct drm_display_mode *mode_ptr; 1425 struct drm_display_mode *mode_ptr;
1419 struct drm_encoder *encoder = intel_attached_encoder(connector); 1426 struct drm_encoder *encoder = intel_attached_encoder(connector);
1420 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1427 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1421 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder); 1428 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
1422 int j, count = 0; 1429 int j, count = 0;
1423 u64 tmp; 1430 u64 tmp;
1424 1431
@@ -1483,8 +1490,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1483{ 1490{
1484 struct drm_device *dev = connector->dev; 1491 struct drm_device *dev = connector->dev;
1485 struct drm_encoder *encoder = intel_attached_encoder(connector); 1492 struct drm_encoder *encoder = intel_attached_encoder(connector);
1486 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1493 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
1487 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1488 struct drm_crtc *crtc = encoder->crtc; 1494 struct drm_crtc *crtc = encoder->crtc;
1489 int ret = 0; 1495 int ret = 0;
1490 bool changed = false; 1496 bool changed = false;
@@ -1494,30 +1500,30 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1494 goto out; 1500 goto out;
1495 1501
1496 if (property == dev->mode_config.tv_left_margin_property && 1502 if (property == dev->mode_config.tv_left_margin_property &&
1497 tv_priv->margin[TV_MARGIN_LEFT] != val) { 1503 intel_tv->margin[TV_MARGIN_LEFT] != val) {
1498 tv_priv->margin[TV_MARGIN_LEFT] = val; 1504 intel_tv->margin[TV_MARGIN_LEFT] = val;
1499 changed = true; 1505 changed = true;
1500 } else if (property == dev->mode_config.tv_right_margin_property && 1506 } else if (property == dev->mode_config.tv_right_margin_property &&
1501 tv_priv->margin[TV_MARGIN_RIGHT] != val) { 1507 intel_tv->margin[TV_MARGIN_RIGHT] != val) {
1502 tv_priv->margin[TV_MARGIN_RIGHT] = val; 1508 intel_tv->margin[TV_MARGIN_RIGHT] = val;
1503 changed = true; 1509 changed = true;
1504 } else if (property == dev->mode_config.tv_top_margin_property && 1510 } else if (property == dev->mode_config.tv_top_margin_property &&
1505 tv_priv->margin[TV_MARGIN_TOP] != val) { 1511 intel_tv->margin[TV_MARGIN_TOP] != val) {
1506 tv_priv->margin[TV_MARGIN_TOP] = val; 1512 intel_tv->margin[TV_MARGIN_TOP] = val;
1507 changed = true; 1513 changed = true;
1508 } else if (property == dev->mode_config.tv_bottom_margin_property && 1514 } else if (property == dev->mode_config.tv_bottom_margin_property &&
1509 tv_priv->margin[TV_MARGIN_BOTTOM] != val) { 1515 intel_tv->margin[TV_MARGIN_BOTTOM] != val) {
1510 tv_priv->margin[TV_MARGIN_BOTTOM] = val; 1516 intel_tv->margin[TV_MARGIN_BOTTOM] = val;
1511 changed = true; 1517 changed = true;
1512 } else if (property == dev->mode_config.tv_mode_property) { 1518 } else if (property == dev->mode_config.tv_mode_property) {
1513 if (val >= ARRAY_SIZE(tv_modes)) { 1519 if (val >= ARRAY_SIZE(tv_modes)) {
1514 ret = -EINVAL; 1520 ret = -EINVAL;
1515 goto out; 1521 goto out;
1516 } 1522 }
1517 if (!strcmp(tv_priv->tv_format, tv_modes[val].name)) 1523 if (!strcmp(intel_tv->tv_format, tv_modes[val].name))
1518 goto out; 1524 goto out;
1519 1525
1520 tv_priv->tv_format = tv_modes[val].name; 1526 intel_tv->tv_format = tv_modes[val].name;
1521 changed = true; 1527 changed = true;
1522 } else { 1528 } else {
1523 ret = -EINVAL; 1529 ret = -EINVAL;
@@ -1553,16 +1559,8 @@ static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs =
1553 .best_encoder = intel_attached_encoder, 1559 .best_encoder = intel_attached_encoder,
1554}; 1560};
1555 1561
1556static void intel_tv_enc_destroy(struct drm_encoder *encoder)
1557{
1558 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1559
1560 drm_encoder_cleanup(encoder);
1561 kfree(intel_encoder);
1562}
1563
1564static const struct drm_encoder_funcs intel_tv_enc_funcs = { 1562static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1565 .destroy = intel_tv_enc_destroy, 1563 .destroy = intel_encoder_destroy,
1566}; 1564};
1567 1565
1568/* 1566/*
@@ -1606,9 +1604,9 @@ intel_tv_init(struct drm_device *dev)
1606{ 1604{
1607 struct drm_i915_private *dev_priv = dev->dev_private; 1605 struct drm_i915_private *dev_priv = dev->dev_private;
1608 struct drm_connector *connector; 1606 struct drm_connector *connector;
1607 struct intel_tv *intel_tv;
1609 struct intel_encoder *intel_encoder; 1608 struct intel_encoder *intel_encoder;
1610 struct intel_connector *intel_connector; 1609 struct intel_connector *intel_connector;
1611 struct intel_tv_priv *tv_priv;
1612 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1610 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1613 char **tv_format_names; 1611 char **tv_format_names;
1614 int i, initial_mode = 0; 1612 int i, initial_mode = 0;
@@ -1647,18 +1645,18 @@ intel_tv_init(struct drm_device *dev)
1647 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1645 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1648 return; 1646 return;
1649 1647
1650 intel_encoder = kzalloc(sizeof(struct intel_encoder) + 1648 intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
1651 sizeof(struct intel_tv_priv), GFP_KERNEL); 1649 if (!intel_tv) {
1652 if (!intel_encoder) {
1653 return; 1650 return;
1654 } 1651 }
1655 1652
1656 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1653 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1657 if (!intel_connector) { 1654 if (!intel_connector) {
1658 kfree(intel_encoder); 1655 kfree(intel_tv);
1659 return; 1656 return;
1660 } 1657 }
1661 1658
1659 intel_encoder = &intel_tv->base;
1662 connector = &intel_connector->base; 1660 connector = &intel_connector->base;
1663 1661
1664 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1662 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
@@ -1668,22 +1666,20 @@ intel_tv_init(struct drm_device *dev)
1668 DRM_MODE_ENCODER_TVDAC); 1666 DRM_MODE_ENCODER_TVDAC);
1669 1667
1670 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc); 1668 drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
1671 tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
1672 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1669 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1673 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1670 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1674 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1671 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1675 intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1672 intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1676 intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1673 intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1677 intel_encoder->dev_priv = tv_priv; 1674 intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
1678 tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
1679 1675
1680 /* BIOS margin values */ 1676 /* BIOS margin values */
1681 tv_priv->margin[TV_MARGIN_LEFT] = 54; 1677 intel_tv->margin[TV_MARGIN_LEFT] = 54;
1682 tv_priv->margin[TV_MARGIN_TOP] = 36; 1678 intel_tv->margin[TV_MARGIN_TOP] = 36;
1683 tv_priv->margin[TV_MARGIN_RIGHT] = 46; 1679 intel_tv->margin[TV_MARGIN_RIGHT] = 46;
1684 tv_priv->margin[TV_MARGIN_BOTTOM] = 37; 1680 intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
1685 1681
1686 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); 1682 intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
1687 1683
1688 drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs); 1684 drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
1689 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); 1685 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
@@ -1703,16 +1699,16 @@ intel_tv_init(struct drm_device *dev)
1703 initial_mode); 1699 initial_mode);
1704 drm_connector_attach_property(connector, 1700 drm_connector_attach_property(connector,
1705 dev->mode_config.tv_left_margin_property, 1701 dev->mode_config.tv_left_margin_property,
1706 tv_priv->margin[TV_MARGIN_LEFT]); 1702 intel_tv->margin[TV_MARGIN_LEFT]);
1707 drm_connector_attach_property(connector, 1703 drm_connector_attach_property(connector,
1708 dev->mode_config.tv_top_margin_property, 1704 dev->mode_config.tv_top_margin_property,
1709 tv_priv->margin[TV_MARGIN_TOP]); 1705 intel_tv->margin[TV_MARGIN_TOP]);
1710 drm_connector_attach_property(connector, 1706 drm_connector_attach_property(connector,
1711 dev->mode_config.tv_right_margin_property, 1707 dev->mode_config.tv_right_margin_property,
1712 tv_priv->margin[TV_MARGIN_RIGHT]); 1708 intel_tv->margin[TV_MARGIN_RIGHT]);
1713 drm_connector_attach_property(connector, 1709 drm_connector_attach_property(connector,
1714 dev->mode_config.tv_bottom_margin_property, 1710 dev->mode_config.tv_bottom_margin_property,
1715 tv_priv->margin[TV_MARGIN_BOTTOM]); 1711 intel_tv->margin[TV_MARGIN_BOTTOM]);
1716out: 1712out:
1717 drm_sysfs_connector_add(connector); 1713 drm_sysfs_connector_add(connector);
1718} 1714}
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index fff82045c427..9ce2827f8c00 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1085,19 +1085,19 @@ file_priv)
1085} 1085}
1086 1086
1087struct drm_ioctl_desc mga_ioctls[] = { 1087struct drm_ioctl_desc mga_ioctls[] = {
1088 DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1088 DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1089 DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH), 1089 DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
1090 DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH), 1090 DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
1091 DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH), 1091 DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
1092 DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH), 1092 DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
1093 DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH), 1093 DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
1094 DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH), 1094 DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
1095 DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH), 1095 DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
1096 DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH), 1096 DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
1097 DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH), 1097 DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
1098 DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH), 1098 DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
1099 DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH), 1099 DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
1100 DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1100 DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1101}; 1101};
1102 1102
1103int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); 1103int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0b69a9628c95..974b0f8ae048 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2166,7 +2166,7 @@ peek_fb(struct drm_device *dev, struct io_mapping *fb,
2166 uint32_t val = 0; 2166 uint32_t val = 0;
2167 2167
2168 if (off < pci_resource_len(dev->pdev, 1)) { 2168 if (off < pci_resource_len(dev->pdev, 1)) {
2169 uint32_t __iomem *p = 2169 uint8_t __iomem *p =
2170 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); 2170 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
2171 2171
2172 val = ioread32(p + (off & ~PAGE_MASK)); 2172 val = ioread32(p + (off & ~PAGE_MASK));
@@ -2182,7 +2182,7 @@ poke_fb(struct drm_device *dev, struct io_mapping *fb,
2182 uint32_t off, uint32_t val) 2182 uint32_t off, uint32_t val)
2183{ 2183{
2184 if (off < pci_resource_len(dev->pdev, 1)) { 2184 if (off < pci_resource_len(dev->pdev, 1)) {
2185 uint32_t __iomem *p = 2185 uint8_t __iomem *p =
2186 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0); 2186 io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
2187 2187
2188 iowrite32(val, p + (off & ~PAGE_MASK)); 2188 iowrite32(val, p + (off & ~PAGE_MASK));
@@ -3869,27 +3869,10 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3869 } 3869 }
3870#ifdef __powerpc__ 3870#ifdef __powerpc__
3871 /* Powerbook specific quirks */ 3871 /* Powerbook specific quirks */
3872 if ((dev->pci_device & 0xffff) == 0x0179 || 3872 if (script == LVDS_RESET &&
3873 (dev->pci_device & 0xffff) == 0x0189 || 3873 (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
3874 (dev->pci_device & 0xffff) == 0x0329) { 3874 dev->pci_device == 0x0329))
3875 if (script == LVDS_RESET) { 3875 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3876 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3877
3878 } else if (script == LVDS_PANEL_ON) {
3879 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3880 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3881 | (1 << 31));
3882 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3883 bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3884
3885 } else if (script == LVDS_PANEL_OFF) {
3886 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
3887 bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
3888 & ~(1 << 31));
3889 bios_wr32(bios, NV_PCRTC_GPIO_EXT,
3890 bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3891 }
3892 }
3893#endif 3876#endif
3894 3877
3895 return 0; 3878 return 0;
@@ -4381,11 +4364,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4381 * 4364 *
4382 * For the moment, a quirk will do :) 4365 * For the moment, a quirk will do :)
4383 */ 4366 */
4384 if ((dev->pdev->device == 0x01d7) && 4367 if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
4385 (dev->pdev->subsystem_vendor == 0x1028) &&
4386 (dev->pdev->subsystem_device == 0x01c2)) {
4387 bios->fp.duallink_transition_clk = 80000; 4368 bios->fp.duallink_transition_clk = 80000;
4388 }
4389 4369
4390 /* set dual_link flag for EDID case */ 4370 /* set dual_link flag for EDID case */
4391 if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) 4371 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
@@ -4587,7 +4567,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4587 return 1; 4567 return 1;
4588 } 4568 }
4589 4569
4590 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script); 4570 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
4591 nouveau_bios_run_init_table(dev, script, dcbent); 4571 nouveau_bios_run_init_table(dev, script, dcbent);
4592 } else 4572 } else
4593 if (pxclk == -1) { 4573 if (pxclk == -1) {
@@ -4597,7 +4577,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4597 return 1; 4577 return 1;
4598 } 4578 }
4599 4579
4600 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script); 4580 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
4601 nouveau_bios_run_init_table(dev, script, dcbent); 4581 nouveau_bios_run_init_table(dev, script, dcbent);
4602 } else 4582 } else
4603 if (pxclk == -2) { 4583 if (pxclk == -2) {
@@ -4610,7 +4590,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4610 return 1; 4590 return 1;
4611 } 4591 }
4612 4592
4613 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script); 4593 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
4614 nouveau_bios_run_init_table(dev, script, dcbent); 4594 nouveau_bios_run_init_table(dev, script, dcbent);
4615 } else 4595 } else
4616 if (pxclk > 0) { 4596 if (pxclk > 0) {
@@ -4622,7 +4602,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4622 return 1; 4602 return 1;
4623 } 4603 }
4624 4604
4625 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script); 4605 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
4626 nouveau_bios_run_init_table(dev, script, dcbent); 4606 nouveau_bios_run_init_table(dev, script, dcbent);
4627 } else 4607 } else
4628 if (pxclk < 0) { 4608 if (pxclk < 0) {
@@ -4634,7 +4614,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
4634 return 1; 4614 return 1;
4635 } 4615 }
4636 4616
4637 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script); 4617 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
4638 nouveau_bios_run_init_table(dev, script, dcbent); 4618 nouveau_bios_run_init_table(dev, script, dcbent);
4639 } 4619 }
4640 4620
@@ -5357,19 +5337,17 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5357 } 5337 }
5358 5338
5359 tmdstableptr = ROM16(bios->data[bitentry->offset]); 5339 tmdstableptr = ROM16(bios->data[bitentry->offset]);
5360 5340 if (!tmdstableptr) {
5361 if (tmdstableptr == 0x0) {
5362 NV_ERROR(dev, "Pointer to TMDS table invalid\n"); 5341 NV_ERROR(dev, "Pointer to TMDS table invalid\n");
5363 return -EINVAL; 5342 return -EINVAL;
5364 } 5343 }
5365 5344
5345 NV_INFO(dev, "TMDS table version %d.%d\n",
5346 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
5347
5366 /* nv50+ has v2.0, but we don't parse it atm */ 5348 /* nv50+ has v2.0, but we don't parse it atm */
5367 if (bios->data[tmdstableptr] != 0x11) { 5349 if (bios->data[tmdstableptr] != 0x11)
5368 NV_WARN(dev,
5369 "TMDS table revision %d.%d not currently supported\n",
5370 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
5371 return -ENOSYS; 5350 return -ENOSYS;
5372 }
5373 5351
5374 /* 5352 /*
5375 * These two scripts are odd: they don't seem to get run even when 5353 * These two scripts are odd: they don't seem to get run even when
@@ -5809,6 +5787,20 @@ parse_dcb_gpio_table(struct nvbios *bios)
5809 gpio->line = tvdac_gpio[1] >> 4; 5787 gpio->line = tvdac_gpio[1] >> 4;
5810 gpio->invert = tvdac_gpio[0] & 2; 5788 gpio->invert = tvdac_gpio[0] & 2;
5811 } 5789 }
5790 } else {
5791 /*
5792 * No systematic way to store GPIO info on pre-v2.2
5793 * DCBs, try to match the PCI device IDs.
5794 */
5795
5796 /* Apple iMac G4 NV18 */
5797 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
5798 struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
5799
5800 gpio->tag = DCB_GPIO_TVDAC0;
5801 gpio->line = 4;
5802 }
5803
5812 } 5804 }
5813 5805
5814 if (!gpio_table_ptr) 5806 if (!gpio_table_ptr)
@@ -5884,9 +5876,7 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5884 struct drm_device *dev = bios->dev; 5876 struct drm_device *dev = bios->dev;
5885 5877
5886 /* Gigabyte NX85T */ 5878 /* Gigabyte NX85T */
5887 if ((dev->pdev->device == 0x0421) && 5879 if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
5888 (dev->pdev->subsystem_vendor == 0x1458) &&
5889 (dev->pdev->subsystem_device == 0x344c)) {
5890 if (cte->type == DCB_CONNECTOR_HDMI_1) 5880 if (cte->type == DCB_CONNECTOR_HDMI_1)
5891 cte->type = DCB_CONNECTOR_DVI_I; 5881 cte->type = DCB_CONNECTOR_DVI_I;
5892 } 5882 }
@@ -6139,7 +6129,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
6139 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; 6129 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
6140 6130
6141 break; 6131 break;
6142 case 0xe: 6132 case OUTPUT_EOL:
6143 /* weird g80 mobile type that "nv" treats as a terminator */ 6133 /* weird g80 mobile type that "nv" treats as a terminator */
6144 dcb->entries--; 6134 dcb->entries--;
6145 return false; 6135 return false;
@@ -6176,22 +6166,14 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
6176 entry->type = OUTPUT_TV; 6166 entry->type = OUTPUT_TV;
6177 break; 6167 break;
6178 case 2: 6168 case 2:
6179 case 3:
6180 entry->type = OUTPUT_LVDS;
6181 break;
6182 case 4: 6169 case 4:
6183 switch ((conn & 0x000000f0) >> 4) { 6170 if (conn & 0x10)
6184 case 0:
6185 entry->type = OUTPUT_TMDS;
6186 break;
6187 case 1:
6188 entry->type = OUTPUT_LVDS; 6171 entry->type = OUTPUT_LVDS;
6189 break; 6172 else
6190 default: 6173 entry->type = OUTPUT_TMDS;
6191 NV_ERROR(dev, "Unknown DCB subtype 4/%d\n", 6174 break;
6192 (conn & 0x000000f0) >> 4); 6175 case 3:
6193 return false; 6176 entry->type = OUTPUT_LVDS;
6194 }
6195 break; 6177 break;
6196 default: 6178 default:
6197 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); 6179 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
@@ -6307,9 +6289,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6307 * nasty problems until this is sorted (assuming it's not a 6289 * nasty problems until this is sorted (assuming it's not a
6308 * VBIOS bug). 6290 * VBIOS bug).
6309 */ 6291 */
6310 if ((dev->pdev->device == 0x040d) && 6292 if (nv_match_device(dev, 0x040d, 0x1028, 0x019b)) {
6311 (dev->pdev->subsystem_vendor == 0x1028) &&
6312 (dev->pdev->subsystem_device == 0x019b)) {
6313 if (*conn == 0x02026312 && *conf == 0x00000020) 6293 if (*conn == 0x02026312 && *conf == 0x00000020)
6314 return false; 6294 return false;
6315 } 6295 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd14dfd3d780..c1de2f3fcb0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -95,6 +95,7 @@ enum dcb_type {
95 OUTPUT_TMDS = 2, 95 OUTPUT_TMDS = 2,
96 OUTPUT_LVDS = 3, 96 OUTPUT_LVDS = 3,
97 OUTPUT_DP = 6, 97 OUTPUT_DP = 6,
98 OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
98 OUTPUT_ANY = -1 99 OUTPUT_ANY = -1
99}; 100};
100 101
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 84f85183d041..f6f44779d82f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -36,6 +36,21 @@
36#include <linux/log2.h> 36#include <linux/log2.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38 38
39int
40nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
41{
42 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
43 int ret;
44
45 if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
46 return 0;
47
48 spin_lock(&nvbo->bo.lock);
49 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
50 spin_unlock(&nvbo->bo.lock);
51 return ret;
52}
53
39static void 54static void
40nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 55nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
41{ 56{
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 90fdcda332be..0480f064f2c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -426,18 +426,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
426 ***********************************/ 426 ***********************************/
427 427
428struct drm_ioctl_desc nouveau_ioctls[] = { 428struct drm_ioctl_desc nouveau_ioctls[] = {
429 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), 429 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
430 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 430 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
431 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), 431 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
432 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), 432 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
433 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), 433 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
434 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), 434 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
435 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), 435 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
436 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), 436 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
437 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), 437 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
438 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), 438 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
439 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), 439 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
440 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), 440 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
441}; 441};
442 442
443int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); 443int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index b1b22baf1428..a1473fff06ac 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -104,7 +104,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
104 int i; 104 int i;
105 105
106 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 106 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
107 struct nouveau_i2c_chan *i2c; 107 struct nouveau_i2c_chan *i2c = NULL;
108 struct nouveau_encoder *nv_encoder; 108 struct nouveau_encoder *nv_encoder;
109 struct drm_mode_object *obj; 109 struct drm_mode_object *obj;
110 int id; 110 int id;
@@ -117,7 +117,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
117 if (!obj) 117 if (!obj)
118 continue; 118 continue;
119 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 119 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
120 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 120
121 if (nv_encoder->dcb->i2c_index < 0xf)
122 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
121 123
122 if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) { 124 if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
123 *pnv_encoder = nv_encoder; 125 *pnv_encoder = nv_encoder;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e424bf74d706..b1be617373b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1165,6 +1165,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1165extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1165extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1166extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1166extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1167extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); 1167extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1168extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
1168 1169
1169/* nouveau_fence.c */ 1170/* nouveau_fence.c */
1170struct nouveau_fence; 1171struct nouveau_fence;
@@ -1388,6 +1389,15 @@ nv_two_reg_pll(struct drm_device *dev)
1388 return false; 1389 return false;
1389} 1390}
1390 1391
1392static inline bool
1393nv_match_device(struct drm_device *dev, unsigned device,
1394 unsigned sub_vendor, unsigned sub_device)
1395{
1396 return dev->pdev->device == device &&
1397 dev->pdev->subsystem_vendor == sub_vendor &&
1398 dev->pdev->subsystem_device == sub_device;
1399}
1400
1391#define NV_SW 0x0000506e 1401#define NV_SW 0x0000506e
1392#define NV_SW_DMA_SEMAPHORE 0x00000060 1402#define NV_SW_DMA_SEMAPHORE 0x00000060
1393#define NV_SW_SEMAPHORE_OFFSET 0x00000064 1403#define NV_SW_SEMAPHORE_OFFSET 0x00000064
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6b208ffafa8d..87ac21ec23d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -64,16 +64,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
64 struct nouveau_fence *fence; 64 struct nouveau_fence *fence;
65 uint32_t sequence; 65 uint32_t sequence;
66 66
67 spin_lock(&chan->fence.lock);
68
67 if (USE_REFCNT) 69 if (USE_REFCNT)
68 sequence = nvchan_rd32(chan, 0x48); 70 sequence = nvchan_rd32(chan, 0x48);
69 else 71 else
70 sequence = atomic_read(&chan->fence.last_sequence_irq); 72 sequence = atomic_read(&chan->fence.last_sequence_irq);
71 73
72 if (chan->fence.sequence_ack == sequence) 74 if (chan->fence.sequence_ack == sequence)
73 return; 75 goto out;
74 chan->fence.sequence_ack = sequence; 76 chan->fence.sequence_ack = sequence;
75 77
76 spin_lock(&chan->fence.lock);
77 list_for_each_safe(entry, tmp, &chan->fence.pending) { 78 list_for_each_safe(entry, tmp, &chan->fence.pending) {
78 fence = list_entry(entry, struct nouveau_fence, entry); 79 fence = list_entry(entry, struct nouveau_fence, entry);
79 80
@@ -85,6 +86,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
85 if (sequence == chan->fence.sequence_ack) 86 if (sequence == chan->fence.sequence_ack)
86 break; 87 break;
87 } 88 }
89out:
88 spin_unlock(&chan->fence.lock); 90 spin_unlock(&chan->fence.lock);
89} 91}
90 92
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0f417ac1b696..ead7b8fc53fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -245,7 +245,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
245 list_del(&nvbo->entry); 245 list_del(&nvbo->entry);
246 nvbo->reserved_by = NULL; 246 nvbo->reserved_by = NULL;
247 ttm_bo_unreserve(&nvbo->bo); 247 ttm_bo_unreserve(&nvbo->bo);
248 drm_gem_object_unreference(nvbo->gem); 248 drm_gem_object_unreference_unlocked(nvbo->gem);
249 } 249 }
250} 250}
251 251
@@ -300,7 +300,7 @@ retry:
300 validate_fini(op, NULL); 300 validate_fini(op, NULL);
301 if (ret == -EAGAIN) 301 if (ret == -EAGAIN)
302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
303 drm_gem_object_unreference(gem); 303 drm_gem_object_unreference_unlocked(gem);
304 if (ret) { 304 if (ret) {
305 NV_ERROR(dev, "fail reserve\n"); 305 NV_ERROR(dev, "fail reserve\n");
306 return ret; 306 return ret;
@@ -337,7 +337,9 @@ retry:
337 return -EINVAL; 337 return -EINVAL;
338 } 338 }
339 339
340 mutex_unlock(&drm_global_mutex);
340 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 341 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
342 mutex_lock(&drm_global_mutex);
341 if (ret) { 343 if (ret) {
342 NV_ERROR(dev, "fail wait_cpu\n"); 344 NV_ERROR(dev, "fail wait_cpu\n");
343 return ret; 345 return ret;
@@ -361,16 +363,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
361 363
362 list_for_each_entry(nvbo, list, entry) { 364 list_for_each_entry(nvbo, list, entry) {
363 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 365 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
364 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
365 366
366 if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { 367 ret = nouveau_bo_sync_gpu(nvbo, chan);
367 spin_lock(&nvbo->bo.lock); 368 if (unlikely(ret)) {
368 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 369 NV_ERROR(dev, "fail pre-validate sync\n");
369 spin_unlock(&nvbo->bo.lock); 370 return ret;
370 if (unlikely(ret)) {
371 NV_ERROR(dev, "fail wait other chan\n");
372 return ret;
373 }
374 } 371 }
375 372
376 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 373 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +378,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
381 return ret; 378 return ret;
382 } 379 }
383 380
384 nvbo->channel = chan; 381 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
385 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 382 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
386 false, false, false); 383 false, false, false);
387 nvbo->channel = NULL; 384 nvbo->channel = NULL;
@@ -390,6 +387,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
390 return ret; 387 return ret;
391 } 388 }
392 389
390 ret = nouveau_bo_sync_gpu(nvbo, chan);
391 if (unlikely(ret)) {
392 NV_ERROR(dev, "fail post-validate sync\n");
393 return ret;
394 }
395
393 if (nvbo->bo.offset == b->presumed.offset && 396 if (nvbo->bo.offset == b->presumed.offset &&
394 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 397 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
395 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 398 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -613,7 +616,20 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
613 return PTR_ERR(bo); 616 return PTR_ERR(bo);
614 } 617 }
615 618
616 mutex_lock(&dev->struct_mutex); 619 /* Mark push buffers as being used on PFIFO, the validation code
620 * will then make sure that if the pushbuf bo moves, that they
621 * happen on the kernel channel, which will in turn cause a sync
622 * to happen before we try and submit the push buffer.
623 */
624 for (i = 0; i < req->nr_push; i++) {
625 if (push[i].bo_index >= req->nr_buffers) {
626 NV_ERROR(dev, "push %d buffer not in list\n", i);
627 ret = -EINVAL;
628 goto out;
629 }
630
631 bo[push[i].bo_index].read_domains |= (1 << 31);
632 }
617 633
618 /* Validate buffer list */ 634 /* Validate buffer list */
619 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, 635 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
@@ -647,7 +663,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
647 push[i].length); 663 push[i].length);
648 } 664 }
649 } else 665 } else
650 if (dev_priv->card_type >= NV_20) { 666 if (dev_priv->chipset >= 0x25) {
651 ret = RING_SPACE(chan, req->nr_push * 2); 667 ret = RING_SPACE(chan, req->nr_push * 2);
652 if (ret) { 668 if (ret) {
653 NV_ERROR(dev, "cal_space: %d\n", ret); 669 NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -713,7 +729,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
713out: 729out:
714 validate_fini(&op, fence); 730 validate_fini(&op, fence);
715 nouveau_fence_unref((void**)&fence); 731 nouveau_fence_unref((void**)&fence);
716 mutex_unlock(&dev->struct_mutex);
717 kfree(bo); 732 kfree(bo);
718 kfree(push); 733 kfree(push);
719 734
@@ -722,7 +737,7 @@ out_next:
722 req->suffix0 = 0x00000000; 737 req->suffix0 = 0x00000000;
723 req->suffix1 = 0x00000000; 738 req->suffix1 = 0x00000000;
724 } else 739 } else
725 if (dev_priv->card_type >= NV_20) { 740 if (dev_priv->chipset >= 0x25) {
726 req->suffix0 = 0x00020000; 741 req->suffix0 = 0x00020000;
727 req->suffix1 = 0x00000000; 742 req->suffix1 = 0x00000000;
728 } else { 743 } else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 0bd407ca3d42..84614858728b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
163 if (entry->chan) 163 if (entry->chan)
164 return -EEXIST; 164 return -EEXIST;
165 165
166 if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) { 166 if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
167 NV_ERROR(dev, "unknown i2c port %d\n", entry->read); 167 NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
168 return -EINVAL; 168 return -EINVAL;
169 } 169 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 491767fe4fcf..6b9187d7f67d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -214,6 +214,7 @@ int
214nouveau_sgdma_init(struct drm_device *dev) 214nouveau_sgdma_init(struct drm_device *dev)
215{ 215{
216 struct drm_nouveau_private *dev_priv = dev->dev_private; 216 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 struct pci_dev *pdev = dev->pdev;
217 struct nouveau_gpuobj *gpuobj = NULL; 218 struct nouveau_gpuobj *gpuobj = NULL;
218 uint32_t aper_size, obj_size; 219 uint32_t aper_size, obj_size;
219 int i, ret; 220 int i, ret;
@@ -239,10 +240,19 @@ nouveau_sgdma_init(struct drm_device *dev)
239 240
240 dev_priv->gart_info.sg_dummy_page = 241 dev_priv->gart_info.sg_dummy_page =
241 alloc_page(GFP_KERNEL|__GFP_DMA32); 242 alloc_page(GFP_KERNEL|__GFP_DMA32);
243 if (!dev_priv->gart_info.sg_dummy_page) {
244 nouveau_gpuobj_del(dev, &gpuobj);
245 return -ENOMEM;
246 }
247
242 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); 248 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
243 dev_priv->gart_info.sg_dummy_bus = 249 dev_priv->gart_info.sg_dummy_bus =
244 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, 250 pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
245 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 251 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
252 if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
253 nouveau_gpuobj_del(dev, &gpuobj);
254 return -EFAULT;
255 }
246 256
247 if (dev_priv->card_type < NV_50) { 257 if (dev_priv->card_type < NV_50) {
248 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and 258 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index a5dcf7685800..0d3206a7046c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -444,6 +444,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
445 struct dcb_entry *dcbe = nv_encoder->dcb; 445 struct dcb_entry *dcbe = nv_encoder->dcb;
446 int head = nouveau_crtc(encoder->crtc)->index; 446 int head = nouveau_crtc(encoder->crtc)->index;
447 struct drm_encoder *slave_encoder;
447 448
448 if (dcbe->type == OUTPUT_TMDS) 449 if (dcbe->type == OUTPUT_TMDS)
449 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); 450 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
@@ -462,9 +463,10 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
462 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 463 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
463 464
464 /* Init external transmitters */ 465 /* Init external transmitters */
465 if (get_tmds_slave(encoder)) 466 slave_encoder = get_tmds_slave(encoder);
466 get_slave_funcs(get_tmds_slave(encoder))->mode_set( 467 if (slave_encoder)
467 encoder, &nv_encoder->mode, &nv_encoder->mode); 468 get_slave_funcs(slave_encoder)->mode_set(
469 slave_encoder, &nv_encoder->mode, &nv_encoder->mode);
468 470
469 helper->dpms(encoder, DRM_MODE_DPMS_ON); 471 helper->dpms(encoder, DRM_MODE_DPMS_ON);
470 472
@@ -473,6 +475,27 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
473 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 475 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
474} 476}
475 477
478static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
479{
480#ifdef __powerpc__
481 struct drm_device *dev = encoder->dev;
482
483 /* BIOS scripts usually take care of the backlight, thanks
484 * Apple for your consistency.
485 */
486 if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
487 dev->pci_device == 0x0329) {
488 if (mode == DRM_MODE_DPMS_ON) {
489 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
490 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1);
491 } else {
492 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
493 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0);
494 }
495 }
496#endif
497}
498
476static inline bool is_powersaving_dpms(int mode) 499static inline bool is_powersaving_dpms(int mode)
477{ 500{
478 return (mode != DRM_MODE_DPMS_ON); 501 return (mode != DRM_MODE_DPMS_ON);
@@ -520,6 +543,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
520 LVDS_PANEL_OFF, 0); 543 LVDS_PANEL_OFF, 0);
521 } 544 }
522 545
546 nv04_dfp_update_backlight(encoder, mode);
523 nv04_dfp_update_fp_control(encoder, mode); 547 nv04_dfp_update_fp_control(encoder, mode);
524 548
525 if (mode == DRM_MODE_DPMS_ON) 549 if (mode == DRM_MODE_DPMS_ON)
@@ -543,6 +567,7 @@ static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
543 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", 567 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
544 mode, nv_encoder->dcb->index); 568 mode, nv_encoder->dcb->index);
545 569
570 nv04_dfp_update_backlight(encoder, mode);
546 nv04_dfp_update_fp_control(encoder, mode); 571 nv04_dfp_update_fp_control(encoder, mode);
547} 572}
548 573
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 44fefb0c7083..13cdc05b7c2d 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -121,10 +121,14 @@ static bool
121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) 121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
122{ 122{
123 /* Zotac FX5200 */ 123 /* Zotac FX5200 */
124 if (dev->pdev->device == 0x0322 && 124 if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) ||
125 dev->pdev->subsystem_vendor == 0x19da && 125 nv_match_device(dev, 0x0322, 0x19da, 0x2035)) {
126 (dev->pdev->subsystem_device == 0x1035 || 126 *pin_mask = 0xc;
127 dev->pdev->subsystem_device == 0x2035)) { 127 return false;
128 }
129
130 /* MSI nForce2 IGP */
131 if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) {
128 *pin_mask = 0xc; 132 *pin_mask = 0xc;
129 return false; 133 return false;
130 } 134 }
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 37c7b48ab24a..91ef93cf1f35 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -139,6 +139,8 @@ nv50_instmem_init(struct drm_device *dev)
139 chan->file_priv = (struct drm_file *)-2; 139 chan->file_priv = (struct drm_file *)-2;
140 dev_priv->fifos[0] = dev_priv->fifos[127] = chan; 140 dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
141 141
142 INIT_LIST_HEAD(&chan->ramht_refs);
143
142 /* Channel's PRAMIN object + heap */ 144 /* Channel's PRAMIN object + heap */
143 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, 145 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
144 NULL, &chan->ramin); 146 NULL, &chan->ramin);
@@ -278,7 +280,7 @@ nv50_instmem_init(struct drm_device *dev)
278 /*XXX: incorrect, but needed to make hash func "work" */ 280 /*XXX: incorrect, but needed to make hash func "work" */
279 dev_priv->ramht_offset = 0x10000; 281 dev_priv->ramht_offset = 0x10000;
280 dev_priv->ramht_bits = 9; 282 dev_priv->ramht_bits = 9;
281 dev_priv->ramht_size = (1 << dev_priv->ramht_bits); 283 dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
282 return 0; 284 return 0;
283} 285}
284 286
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 3ab3cdc42173..6b451f864783 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -142,14 +142,16 @@ int
142nvc0_instmem_suspend(struct drm_device *dev) 142nvc0_instmem_suspend(struct drm_device *dev)
143{ 143{
144 struct drm_nouveau_private *dev_priv = dev->dev_private; 144 struct drm_nouveau_private *dev_priv = dev->dev_private;
145 u32 *buf;
145 int i; 146 int i;
146 147
147 dev_priv->susres.ramin_copy = vmalloc(65536); 148 dev_priv->susres.ramin_copy = vmalloc(65536);
148 if (!dev_priv->susres.ramin_copy) 149 if (!dev_priv->susres.ramin_copy)
149 return -ENOMEM; 150 return -ENOMEM;
151 buf = dev_priv->susres.ramin_copy;
150 152
151 for (i = 0x700000; i < 0x710000; i += 4) 153 for (i = 0; i < 65536; i += 4)
152 dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i); 154 buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
153 return 0; 155 return 0;
154} 156}
155 157
@@ -157,14 +159,15 @@ void
157nvc0_instmem_resume(struct drm_device *dev) 159nvc0_instmem_resume(struct drm_device *dev)
158{ 160{
159 struct drm_nouveau_private *dev_priv = dev->dev_private; 161 struct drm_nouveau_private *dev_priv = dev->dev_private;
162 u32 *buf = dev_priv->susres.ramin_copy;
160 u64 chan; 163 u64 chan;
161 int i; 164 int i;
162 165
163 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; 166 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
164 nv_wr32(dev, 0x001700, chan >> 16); 167 nv_wr32(dev, 0x001700, chan >> 16);
165 168
166 for (i = 0x700000; i < 0x710000; i += 4) 169 for (i = 0; i < 65536; i += 4)
167 nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]); 170 nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
168 vfree(dev_priv->susres.ramin_copy); 171 vfree(dev_priv->susres.ramin_copy);
169 dev_priv->susres.ramin_copy = NULL; 172 dev_priv->susres.ramin_copy = NULL;
170 173
@@ -221,7 +224,7 @@ nvc0_instmem_init(struct drm_device *dev)
221 /*XXX: incorrect, but needed to make hash func "work" */ 224 /*XXX: incorrect, but needed to make hash func "work" */
222 dev_priv->ramht_offset = 0x10000; 225 dev_priv->ramht_offset = 0x10000;
223 dev_priv->ramht_bits = 9; 226 dev_priv->ramht_bits = 9;
224 dev_priv->ramht_size = (1 << dev_priv->ramht_bits); 227 dev_priv->ramht_size = (1 << dev_priv->ramht_bits) * 8;
225 return 0; 228 return 0;
226} 229}
227 230
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 077af1f2f9b4..a9e33ce65918 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1639,30 +1639,29 @@ void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1639 r128_do_cleanup_pageflip(dev); 1639 r128_do_cleanup_pageflip(dev);
1640 } 1640 }
1641} 1641}
1642
1643void r128_driver_lastclose(struct drm_device *dev) 1642void r128_driver_lastclose(struct drm_device *dev)
1644{ 1643{
1645 r128_do_cleanup_cce(dev); 1644 r128_do_cleanup_cce(dev);
1646} 1645}
1647 1646
1648struct drm_ioctl_desc r128_ioctls[] = { 1647struct drm_ioctl_desc r128_ioctls[] = {
1649 DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1648 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1650 DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1649 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1651 DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1650 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1652 DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1651 DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1653 DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH), 1652 DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1654 DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH), 1653 DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
1655 DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH), 1654 DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1656 DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH), 1655 DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
1657 DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH), 1656 DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
1658 DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH), 1657 DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
1659 DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH), 1658 DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1660 DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH), 1659 DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
1661 DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH), 1660 DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
1662 DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH), 1661 DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
1663 DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH), 1662 DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1664 DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1663 DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1665 DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH), 1664 DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
1666}; 1665};
1667 1666
1668int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls); 1667int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 12ad512bd3d3..464a81a1990f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -332,6 +332,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
332 args.usV_SyncWidth = 332 args.usV_SyncWidth =
333 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); 333 cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
334 334
335 args.ucOverscanRight = radeon_crtc->h_border;
336 args.ucOverscanLeft = radeon_crtc->h_border;
337 args.ucOverscanBottom = radeon_crtc->v_border;
338 args.ucOverscanTop = radeon_crtc->v_border;
339
335 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 340 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
336 misc |= ATOM_VSYNC_POLARITY; 341 misc |= ATOM_VSYNC_POLARITY;
337 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 342 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -471,6 +476,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
471 struct radeon_encoder *radeon_encoder = NULL; 476 struct radeon_encoder *radeon_encoder = NULL;
472 u32 adjusted_clock = mode->clock; 477 u32 adjusted_clock = mode->clock;
473 int encoder_mode = 0; 478 int encoder_mode = 0;
479 u32 dp_clock = mode->clock;
480 int bpc = 8;
474 481
475 /* reset the pll flags */ 482 /* reset the pll flags */
476 pll->flags = 0; 483 pll->flags = 0;
@@ -513,6 +520,17 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
513 if (encoder->crtc == crtc) { 520 if (encoder->crtc == crtc) {
514 radeon_encoder = to_radeon_encoder(encoder); 521 radeon_encoder = to_radeon_encoder(encoder);
515 encoder_mode = atombios_get_encoder_mode(encoder); 522 encoder_mode = atombios_get_encoder_mode(encoder);
523 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
524 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
525 if (connector) {
526 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
527 struct radeon_connector_atom_dig *dig_connector =
528 radeon_connector->con_priv;
529
530 dp_clock = dig_connector->dp_clock;
531 }
532 }
533
516 if (ASIC_IS_AVIVO(rdev)) { 534 if (ASIC_IS_AVIVO(rdev)) {
517 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 535 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
518 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 536 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
@@ -521,6 +539,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
521 pll->algo = PLL_ALGO_LEGACY; 539 pll->algo = PLL_ALGO_LEGACY;
522 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 540 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
523 } 541 }
542 /* There is some evidence (often anecdotal) that RV515 LVDS
543 * (on some boards at least) prefers the legacy algo. I'm not
544 * sure whether this should handled generically or on a
545 * case-by-case quirk basis. Both algos should work fine in the
546 * majority of cases.
547 */
548 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) &&
549 (rdev->family == CHIP_RV515)) {
550 /* allow the user to overrride just in case */
551 if (radeon_new_pll == 1)
552 pll->algo = PLL_ALGO_NEW;
553 else
554 pll->algo = PLL_ALGO_LEGACY;
555 }
524 } else { 556 } else {
525 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 557 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
526 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 558 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -555,6 +587,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 587 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
556 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 588 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
557 args.v1.ucEncodeMode = encoder_mode; 589 args.v1.ucEncodeMode = encoder_mode;
590 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
591 /* may want to enable SS on DP eventually */
592 /* args.v1.ucConfig |=
593 ADJUST_DISPLAY_CONFIG_SS_ENABLE;*/
594 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
595 args.v1.ucConfig |=
596 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
597 }
558 598
559 atom_execute_table(rdev->mode_info.atom_context, 599 atom_execute_table(rdev->mode_info.atom_context,
560 index, (uint32_t *)&args); 600 index, (uint32_t *)&args);
@@ -568,10 +608,20 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
568 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 608 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
569 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 609 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
570 610
571 if (encoder_mode == ATOM_ENCODER_MODE_DP) 611 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
612 /* may want to enable SS on DP/eDP eventually */
613 /*args.v3.sInput.ucDispPllConfig |=
614 DISPPLL_CONFIG_SS_ENABLE;*/
572 args.v3.sInput.ucDispPllConfig |= 615 args.v3.sInput.ucDispPllConfig |=
573 DISPPLL_CONFIG_COHERENT_MODE; 616 DISPPLL_CONFIG_COHERENT_MODE;
574 else { 617 /* 16200 or 27000 */
618 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
619 } else {
620 if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
621 /* deep color support */
622 args.v3.sInput.usPixelClock =
623 cpu_to_le16((mode->clock * bpc / 8) / 10);
624 }
575 if (dig->coherent_mode) 625 if (dig->coherent_mode)
576 args.v3.sInput.ucDispPllConfig |= 626 args.v3.sInput.ucDispPllConfig |=
577 DISPPLL_CONFIG_COHERENT_MODE; 627 DISPPLL_CONFIG_COHERENT_MODE;
@@ -580,13 +630,19 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
580 DISPPLL_CONFIG_DUAL_LINK; 630 DISPPLL_CONFIG_DUAL_LINK;
581 } 631 }
582 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 632 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
583 /* may want to enable SS on DP/eDP eventually */ 633 if (encoder_mode == ATOM_ENCODER_MODE_DP) {
584 /*args.v3.sInput.ucDispPllConfig |= 634 /* may want to enable SS on DP/eDP eventually */
585 DISPPLL_CONFIG_SS_ENABLE;*/ 635 /*args.v3.sInput.ucDispPllConfig |=
586 if (encoder_mode == ATOM_ENCODER_MODE_DP) 636 DISPPLL_CONFIG_SS_ENABLE;*/
587 args.v3.sInput.ucDispPllConfig |= 637 args.v3.sInput.ucDispPllConfig |=
588 DISPPLL_CONFIG_COHERENT_MODE; 638 DISPPLL_CONFIG_COHERENT_MODE;
589 else { 639 /* 16200 or 27000 */
640 args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
641 } else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
642 /* want to enable SS on LVDS eventually */
643 /*args.v3.sInput.ucDispPllConfig |=
644 DISPPLL_CONFIG_SS_ENABLE;*/
645 } else {
590 if (mode->clock > 165000) 646 if (mode->clock > 165000)
591 args.v3.sInput.ucDispPllConfig |= 647 args.v3.sInput.ucDispPllConfig |=
592 DISPPLL_CONFIG_DUAL_LINK; 648 DISPPLL_CONFIG_DUAL_LINK;
@@ -1019,11 +1075,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
1019 1075
1020 if (rdev->family >= CHIP_RV770) { 1076 if (rdev->family >= CHIP_RV770) {
1021 if (radeon_crtc->crtc_id) { 1077 if (radeon_crtc->crtc_id) {
1022 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); 1078 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1023 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); 1079 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1024 } else { 1080 } else {
1025 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); 1081 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1026 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); 1082 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
1027 } 1083 }
1028 } 1084 }
1029 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, 1085 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1160,8 +1216,18 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1160 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1216 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1161 struct drm_device *dev = crtc->dev; 1217 struct drm_device *dev = crtc->dev;
1162 struct radeon_device *rdev = dev->dev_private; 1218 struct radeon_device *rdev = dev->dev_private;
1219 struct drm_encoder *encoder;
1220 bool is_tvcv = false;
1163 1221
1164 /* TODO color tiling */ 1222 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1223 /* find tv std */
1224 if (encoder->crtc == crtc) {
1225 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1226 if (radeon_encoder->active_device &
1227 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1228 is_tvcv = true;
1229 }
1230 }
1165 1231
1166 atombios_disable_ss(crtc); 1232 atombios_disable_ss(crtc);
1167 /* always set DCPLL */ 1233 /* always set DCPLL */
@@ -1170,9 +1236,14 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1170 atombios_crtc_set_pll(crtc, adjusted_mode); 1236 atombios_crtc_set_pll(crtc, adjusted_mode);
1171 atombios_enable_ss(crtc); 1237 atombios_enable_ss(crtc);
1172 1238
1173 if (ASIC_IS_AVIVO(rdev)) 1239 if (ASIC_IS_DCE4(rdev))
1174 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1240 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1175 else { 1241 else if (ASIC_IS_AVIVO(rdev)) {
1242 if (is_tvcv)
1243 atombios_crtc_set_timing(crtc, adjusted_mode);
1244 else
1245 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
1246 } else {
1176 atombios_crtc_set_timing(crtc, adjusted_mode); 1247 atombios_crtc_set_timing(crtc, adjusted_mode);
1177 if (radeon_crtc->crtc_id == 0) 1248 if (radeon_crtc->crtc_id == 0)
1178 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1249 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 36e0d4b545e6..4e7778d44b8d 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -610,7 +610,7 @@ void dp_link_train(struct drm_encoder *encoder,
610 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; 610 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
611 else 611 else
612 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; 612 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
613 if (dig_connector->linkb) 613 if (dig->linkb)
614 enc_id |= ATOM_DP_CONFIG_LINK_B; 614 enc_id |= ATOM_DP_CONFIG_LINK_B;
615 else 615 else
616 enc_id |= ATOM_DP_CONFIG_LINK_A; 616 enc_id |= ATOM_DP_CONFIG_LINK_A;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 957d5067ad9c..b8b7f010b25f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -675,6 +675,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
675 return 0; 675 return 0;
676} 676}
677 677
678static int evergreen_cp_start(struct radeon_device *rdev)
679{
680 int r;
681 uint32_t cp_me;
682
683 r = radeon_ring_lock(rdev, 7);
684 if (r) {
685 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
686 return r;
687 }
688 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
689 radeon_ring_write(rdev, 0x1);
690 radeon_ring_write(rdev, 0x0);
691 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
692 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
693 radeon_ring_write(rdev, 0);
694 radeon_ring_write(rdev, 0);
695 radeon_ring_unlock_commit(rdev);
696
697 cp_me = 0xff;
698 WREG32(CP_ME_CNTL, cp_me);
699
700 r = radeon_ring_lock(rdev, 4);
701 if (r) {
702 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
703 return r;
704 }
705 /* init some VGT regs */
706 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
707 radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2);
708 radeon_ring_write(rdev, 0xe);
709 radeon_ring_write(rdev, 0x10);
710 radeon_ring_unlock_commit(rdev);
711
712 return 0;
713}
714
678int evergreen_cp_resume(struct radeon_device *rdev) 715int evergreen_cp_resume(struct radeon_device *rdev)
679{ 716{
680 u32 tmp; 717 u32 tmp;
@@ -719,7 +756,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
719 rdev->cp.rptr = RREG32(CP_RB_RPTR); 756 rdev->cp.rptr = RREG32(CP_RB_RPTR);
720 rdev->cp.wptr = RREG32(CP_RB_WPTR); 757 rdev->cp.wptr = RREG32(CP_RB_WPTR);
721 758
722 r600_cp_start(rdev); 759 evergreen_cp_start(rdev);
723 rdev->cp.ready = true; 760 rdev->cp.ready = true;
724 r = radeon_ring_test(rdev); 761 r = radeon_ring_test(rdev);
725 if (r) { 762 if (r) {
@@ -2054,11 +2091,6 @@ int evergreen_resume(struct radeon_device *rdev)
2054 */ 2091 */
2055 /* post card */ 2092 /* post card */
2056 atom_asic_init(rdev->mode_info.atom_context); 2093 atom_asic_init(rdev->mode_info.atom_context);
2057 /* Initialize clocks */
2058 r = radeon_clocks_init(rdev);
2059 if (r) {
2060 return r;
2061 }
2062 2094
2063 r = evergreen_startup(rdev); 2095 r = evergreen_startup(rdev);
2064 if (r) { 2096 if (r) {
@@ -2164,9 +2196,6 @@ int evergreen_init(struct radeon_device *rdev)
2164 radeon_surface_init(rdev); 2196 radeon_surface_init(rdev);
2165 /* Initialize clocks */ 2197 /* Initialize clocks */
2166 radeon_get_clock_info(rdev->ddev); 2198 radeon_get_clock_info(rdev->ddev);
2167 r = radeon_clocks_init(rdev);
2168 if (r)
2169 return r;
2170 /* Fence driver */ 2199 /* Fence driver */
2171 r = radeon_fence_driver_init(rdev); 2200 r = radeon_fence_driver_init(rdev);
2172 if (r) 2201 if (r)
@@ -2236,7 +2265,6 @@ void evergreen_fini(struct radeon_device *rdev)
2236 evergreen_pcie_gart_fini(rdev); 2265 evergreen_pcie_gart_fini(rdev);
2237 radeon_gem_fini(rdev); 2266 radeon_gem_fini(rdev);
2238 radeon_fence_driver_fini(rdev); 2267 radeon_fence_driver_fini(rdev);
2239 radeon_clocks_fini(rdev);
2240 radeon_agp_fini(rdev); 2268 radeon_agp_fini(rdev);
2241 radeon_bo_fini(rdev); 2269 radeon_bo_fini(rdev);
2242 radeon_atombios_fini(rdev); 2270 radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d0ebae9dde25..afc18d87fdca 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2119,10 +2119,7 @@ int r600_cp_start(struct radeon_device *rdev)
2119 } 2119 }
2120 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2120 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2121 radeon_ring_write(rdev, 0x1); 2121 radeon_ring_write(rdev, 0x1);
2122 if (rdev->family >= CHIP_CEDAR) { 2122 if (rdev->family >= CHIP_RV770) {
2123 radeon_ring_write(rdev, 0x0);
2124 radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
2125 } else if (rdev->family >= CHIP_RV770) {
2126 radeon_ring_write(rdev, 0x0); 2123 radeon_ring_write(rdev, 0x0);
2127 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); 2124 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2128 } else { 2125 } else {
@@ -2489,11 +2486,6 @@ int r600_resume(struct radeon_device *rdev)
2489 */ 2486 */
2490 /* post card */ 2487 /* post card */
2491 atom_asic_init(rdev->mode_info.atom_context); 2488 atom_asic_init(rdev->mode_info.atom_context);
2492 /* Initialize clocks */
2493 r = radeon_clocks_init(rdev);
2494 if (r) {
2495 return r;
2496 }
2497 2489
2498 r = r600_startup(rdev); 2490 r = r600_startup(rdev);
2499 if (r) { 2491 if (r) {
@@ -2586,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
2586 radeon_surface_init(rdev); 2578 radeon_surface_init(rdev);
2587 /* Initialize clocks */ 2579 /* Initialize clocks */
2588 radeon_get_clock_info(rdev->ddev); 2580 radeon_get_clock_info(rdev->ddev);
2589 r = radeon_clocks_init(rdev);
2590 if (r)
2591 return r;
2592 /* Fence driver */ 2581 /* Fence driver */
2593 r = radeon_fence_driver_init(rdev); 2582 r = radeon_fence_driver_init(rdev);
2594 if (r) 2583 if (r)
@@ -2663,7 +2652,6 @@ void r600_fini(struct radeon_device *rdev)
2663 radeon_agp_fini(rdev); 2652 radeon_agp_fini(rdev);
2664 radeon_gem_fini(rdev); 2653 radeon_gem_fini(rdev);
2665 radeon_fence_driver_fini(rdev); 2654 radeon_fence_driver_fini(rdev);
2666 radeon_clocks_fini(rdev);
2667 radeon_bo_fini(rdev); 2655 radeon_bo_fini(rdev);
2668 radeon_atombios_fini(rdev); 2656 radeon_atombios_fini(rdev);
2669 kfree(rdev->bios); 2657 kfree(rdev->bios);
@@ -3541,7 +3529,7 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3541 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 3529 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
3542 */ 3530 */
3543 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { 3531 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
3544 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; 3532 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3545 u32 tmp; 3533 u32 tmp;
3546 3534
3547 WREG32(HDP_DEBUG1, 0); 3535 WREG32(HDP_DEBUG1, 0);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3dfcfa3ca425..a168d644bf9e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1013,6 +1013,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1013int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 1013int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1014 struct drm_file *filp); 1014 struct drm_file *filp);
1015 1015
1016/* VRAM scratch page for HDP bug */
1017struct r700_vram_scratch {
1018 struct radeon_bo *robj;
1019 volatile uint32_t *ptr;
1020};
1016 1021
1017/* 1022/*
1018 * Core structure, functions and helpers. 1023 * Core structure, functions and helpers.
@@ -1079,6 +1084,7 @@ struct radeon_device {
1079 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1084 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
1080 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1085 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1081 struct r600_blit r600_blit; 1086 struct r600_blit r600_blit;
1087 struct r700_vram_scratch vram_scratch;
1082 int msi_enabled; /* msi enabled */ 1088 int msi_enabled; /* msi enabled */
1083 struct r600_ih ih; /* r6/700 interrupt ring */ 1089 struct r600_ih ih; /* r6/700 interrupt ring */
1084 struct workqueue_struct *wq; 1090 struct workqueue_struct *wq;
@@ -1333,8 +1339,6 @@ extern bool radeon_card_posted(struct radeon_device *rdev);
1333extern void radeon_update_bandwidth_info(struct radeon_device *rdev); 1339extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
1334extern void radeon_update_display_priority(struct radeon_device *rdev); 1340extern void radeon_update_display_priority(struct radeon_device *rdev);
1335extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 1341extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
1336extern int radeon_clocks_init(struct radeon_device *rdev);
1337extern void radeon_clocks_fini(struct radeon_device *rdev);
1338extern void radeon_scratch_init(struct radeon_device *rdev); 1342extern void radeon_scratch_init(struct radeon_device *rdev);
1339extern void radeon_surface_init(struct radeon_device *rdev); 1343extern void radeon_surface_init(struct radeon_device *rdev);
1340extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1344extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index f40dfb77f9b1..bd2f33e5c91a 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -156,7 +156,13 @@ int radeon_agp_init(struct radeon_device *rdev)
156 } 156 }
157 157
158 mode.mode = info.mode; 158 mode.mode = info.mode;
159 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; 159 /* chips with the agp to pcie bridge don't have the AGP_STATUS register
160 * Just use the whatever mode the host sets up.
161 */
162 if (rdev->family <= CHIP_RV350)
163 agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
164 else
165 agp_status = mode.mode;
160 is_v3 = !!(agp_status & RADEON_AGPv3_MODE); 166 is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
161 167
162 if (is_v3) { 168 if (is_v3) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 646f96f97c77..25e1dd197791 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -733,6 +733,7 @@ static struct radeon_asic evergreen_asic = {
733 .set_engine_clock = &radeon_atom_set_engine_clock, 733 .set_engine_clock = &radeon_atom_set_engine_clock,
734 .get_memory_clock = &radeon_atom_get_memory_clock, 734 .get_memory_clock = &radeon_atom_get_memory_clock,
735 .set_memory_clock = &radeon_atom_set_memory_clock, 735 .set_memory_clock = &radeon_atom_set_memory_clock,
736 .get_pcie_lanes = NULL,
736 .set_pcie_lanes = NULL, 737 .set_pcie_lanes = NULL,
737 .set_clock_gating = NULL, 738 .set_clock_gating = NULL,
738 .set_surface_reg = r600_set_surface_reg, 739 .set_surface_reg = r600_set_surface_reg,
@@ -857,21 +858,3 @@ int radeon_asic_init(struct radeon_device *rdev)
857 return 0; 858 return 0;
858} 859}
859 860
860/*
861 * Wrapper around modesetting bits. Move to radeon_clocks.c?
862 */
863int radeon_clocks_init(struct radeon_device *rdev)
864{
865 int r;
866
867 r = radeon_static_clocks_init(rdev->ddev);
868 if (r) {
869 return r;
870 }
871 DRM_INFO("Clocks initialized !\n");
872 return 0;
873}
874
875void radeon_clocks_fini(struct radeon_device *rdev)
876{
877}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6d30868744ee..ebae14c4b768 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -32,11 +32,11 @@
32 32
33/* from radeon_encoder.c */ 33/* from radeon_encoder.c */
34extern uint32_t 34extern uint32_t
35radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, 35radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
36 uint8_t dac); 36 uint8_t dac);
37extern void radeon_link_encoder_connector(struct drm_device *dev); 37extern void radeon_link_encoder_connector(struct drm_device *dev);
38extern void 38extern void
39radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, 39radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
40 uint32_t supported_device); 40 uint32_t supported_device);
41 41
42/* from radeon_connector.c */ 42/* from radeon_connector.c */
@@ -46,14 +46,14 @@ radeon_add_atom_connector(struct drm_device *dev,
46 uint32_t supported_device, 46 uint32_t supported_device,
47 int connector_type, 47 int connector_type,
48 struct radeon_i2c_bus_rec *i2c_bus, 48 struct radeon_i2c_bus_rec *i2c_bus,
49 bool linkb, uint32_t igp_lane_info, 49 uint32_t igp_lane_info,
50 uint16_t connector_object_id, 50 uint16_t connector_object_id,
51 struct radeon_hpd *hpd, 51 struct radeon_hpd *hpd,
52 struct radeon_router *router); 52 struct radeon_router *router);
53 53
54/* from radeon_legacy_encoder.c */ 54/* from radeon_legacy_encoder.c */
55extern void 55extern void
56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, 56radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
57 uint32_t supported_device); 57 uint32_t supported_device);
58 58
59union atom_supported_devices { 59union atom_supported_devices {
@@ -85,6 +85,19 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
85 for (i = 0; i < num_indices; i++) { 85 for (i = 0; i < num_indices; i++) {
86 gpio = &i2c_info->asGPIO_Info[i]; 86 gpio = &i2c_info->asGPIO_Info[i];
87 87
88 /* some evergreen boards have bad data for this entry */
89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) &&
91 (gpio->usClkMaskRegisterIndex == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8;
95 gpio->ucDataEnShift = 8;
96 gpio->ucDataY_Shift = 8;
97 gpio->ucDataA_Shift = 8;
98 }
99 }
100
88 if (gpio->sucI2cId.ucAccess == id) { 101 if (gpio->sucI2cId.ucAccess == id) {
89 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 102 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
90 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 103 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -147,6 +160,20 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
147 for (i = 0; i < num_indices; i++) { 160 for (i = 0; i < num_indices; i++) {
148 gpio = &i2c_info->asGPIO_Info[i]; 161 gpio = &i2c_info->asGPIO_Info[i];
149 i2c.valid = false; 162 i2c.valid = false;
163
164 /* some evergreen boards have bad data for this entry */
165 if (ASIC_IS_DCE4(rdev)) {
166 if ((i == 7) &&
167 (gpio->usClkMaskRegisterIndex == 0x1936) &&
168 (gpio->sucI2cId.ucAccess == 0)) {
169 gpio->sucI2cId.ucAccess = 0x97;
170 gpio->ucDataMaskShift = 8;
171 gpio->ucDataEnShift = 8;
172 gpio->ucDataY_Shift = 8;
173 gpio->ucDataA_Shift = 8;
174 }
175 }
176
150 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 177 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
151 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 178 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
152 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 179 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
@@ -226,6 +253,8 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
226 struct radeon_hpd hpd; 253 struct radeon_hpd hpd;
227 u32 reg; 254 u32 reg;
228 255
256 memset(&hpd, 0, sizeof(struct radeon_hpd));
257
229 if (ASIC_IS_DCE4(rdev)) 258 if (ASIC_IS_DCE4(rdev))
230 reg = EVERGREEN_DC_GPIO_HPD_A; 259 reg = EVERGREEN_DC_GPIO_HPD_A;
231 else 260 else
@@ -477,7 +506,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
477 int i, j, k, path_size, device_support; 506 int i, j, k, path_size, device_support;
478 int connector_type; 507 int connector_type;
479 u16 igp_lane_info, conn_id, connector_object_id; 508 u16 igp_lane_info, conn_id, connector_object_id;
480 bool linkb;
481 struct radeon_i2c_bus_rec ddc_bus; 509 struct radeon_i2c_bus_rec ddc_bus;
482 struct radeon_router router; 510 struct radeon_router router;
483 struct radeon_gpio_rec gpio; 511 struct radeon_gpio_rec gpio;
@@ -510,7 +538,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
510 addr += path_size; 538 addr += path_size;
511 path = (ATOM_DISPLAY_OBJECT_PATH *) addr; 539 path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
512 path_size += le16_to_cpu(path->usSize); 540 path_size += le16_to_cpu(path->usSize);
513 linkb = false; 541
514 if (device_support & le16_to_cpu(path->usDeviceTag)) { 542 if (device_support & le16_to_cpu(path->usDeviceTag)) {
515 uint8_t con_obj_id, con_obj_num, con_obj_type; 543 uint8_t con_obj_id, con_obj_num, con_obj_type;
516 544
@@ -601,13 +629,10 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
601 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 629 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
602 630
603 if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { 631 if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
604 if (grph_obj_num == 2) 632 u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
605 linkb = true;
606 else
607 linkb = false;
608 633
609 radeon_add_atom_encoder(dev, 634 radeon_add_atom_encoder(dev,
610 grph_obj_id, 635 encoder_obj,
611 le16_to_cpu 636 le16_to_cpu
612 (path-> 637 (path->
613 usDeviceTag)); 638 usDeviceTag));
@@ -744,7 +769,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
744 le16_to_cpu(path-> 769 le16_to_cpu(path->
745 usDeviceTag), 770 usDeviceTag),
746 connector_type, &ddc_bus, 771 connector_type, &ddc_bus,
747 linkb, igp_lane_info, 772 igp_lane_info,
748 connector_object_id, 773 connector_object_id,
749 &hpd, 774 &hpd,
750 &router); 775 &router);
@@ -933,13 +958,13 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
933 958
934 if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) 959 if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)
935 radeon_add_atom_encoder(dev, 960 radeon_add_atom_encoder(dev,
936 radeon_get_encoder_id(dev, 961 radeon_get_encoder_enum(dev,
937 (1 << i), 962 (1 << i),
938 dac), 963 dac),
939 (1 << i)); 964 (1 << i));
940 else 965 else
941 radeon_add_legacy_encoder(dev, 966 radeon_add_legacy_encoder(dev,
942 radeon_get_encoder_id(dev, 967 radeon_get_encoder_enum(dev,
943 (1 << i), 968 (1 << i),
944 dac), 969 dac),
945 (1 << i)); 970 (1 << i));
@@ -996,7 +1021,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
996 bios_connectors[i]. 1021 bios_connectors[i].
997 connector_type, 1022 connector_type,
998 &bios_connectors[i].ddc_bus, 1023 &bios_connectors[i].ddc_bus,
999 false, 0, 1024 0,
1000 connector_object_id, 1025 connector_object_id,
1001 &bios_connectors[i].hpd, 1026 &bios_connectors[i].hpd,
1002 &router); 1027 &router);
@@ -1183,7 +1208,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1183 return true; 1208 return true;
1184 break; 1209 break;
1185 case 2: 1210 case 2:
1186 if (igp_info->info_2.ucMemoryType & 0x0f) 1211 if (igp_info->info_2.ulBootUpSidePortClock)
1187 return true; 1212 return true;
1188 break; 1213 break;
1189 default: 1214 default:
@@ -1305,6 +1330,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1305 union lvds_info *lvds_info; 1330 union lvds_info *lvds_info;
1306 uint8_t frev, crev; 1331 uint8_t frev, crev;
1307 struct radeon_encoder_atom_dig *lvds = NULL; 1332 struct radeon_encoder_atom_dig *lvds = NULL;
1333 int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1308 1334
1309 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 1335 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1310 &frev, &crev, &data_offset)) { 1336 &frev, &crev, &data_offset)) {
@@ -1368,6 +1394,12 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1368 } 1394 }
1369 1395
1370 encoder->native_mode = lvds->native_mode; 1396 encoder->native_mode = lvds->native_mode;
1397
1398 if (encoder_enum == 2)
1399 lvds->linkb = true;
1400 else
1401 lvds->linkb = false;
1402
1371 } 1403 }
1372 return lvds; 1404 return lvds;
1373} 1405}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 14448a740ba6..5249af8931e6 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -327,6 +327,14 @@ void radeon_get_clock_info(struct drm_device *dev)
327 mpll->max_feedback_div = 0xff; 327 mpll->max_feedback_div = 0xff;
328 mpll->best_vco = 0; 328 mpll->best_vco = 0;
329 329
330 if (!rdev->clock.default_sclk)
331 rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
332 if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
333 rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
334
335 rdev->pm.current_sclk = rdev->clock.default_sclk;
336 rdev->pm.current_mclk = rdev->clock.default_mclk;
337
330} 338}
331 339
332/* 10 khz */ 340/* 10 khz */
@@ -897,53 +905,3 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
897 } 905 }
898} 906}
899 907
900static void radeon_apply_clock_quirks(struct radeon_device *rdev)
901{
902 uint32_t tmp;
903
904 /* XXX make sure engine is idle */
905
906 if (rdev->family < CHIP_RS600) {
907 tmp = RREG32_PLL(RADEON_SCLK_CNTL);
908 if (ASIC_IS_R300(rdev) || ASIC_IS_RV100(rdev))
909 tmp |= RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_VIP;
910 if ((rdev->family == CHIP_RV250)
911 || (rdev->family == CHIP_RV280))
912 tmp |=
913 RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_DISP2;
914 if ((rdev->family == CHIP_RV350)
915 || (rdev->family == CHIP_RV380))
916 tmp |= R300_SCLK_FORCE_VAP;
917 if (rdev->family == CHIP_R420)
918 tmp |= R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX;
919 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
920 } else if (rdev->family < CHIP_R600) {
921 tmp = RREG32_PLL(AVIVO_CP_DYN_CNTL);
922 tmp |= AVIVO_CP_FORCEON;
923 WREG32_PLL(AVIVO_CP_DYN_CNTL, tmp);
924
925 tmp = RREG32_PLL(AVIVO_E2_DYN_CNTL);
926 tmp |= AVIVO_E2_FORCEON;
927 WREG32_PLL(AVIVO_E2_DYN_CNTL, tmp);
928
929 tmp = RREG32_PLL(AVIVO_IDCT_DYN_CNTL);
930 tmp |= AVIVO_IDCT_FORCEON;
931 WREG32_PLL(AVIVO_IDCT_DYN_CNTL, tmp);
932 }
933}
934
935int radeon_static_clocks_init(struct drm_device *dev)
936{
937 struct radeon_device *rdev = dev->dev_private;
938
939 /* XXX make sure engine is idle */
940
941 if (radeon_dynclks != -1) {
942 if (radeon_dynclks) {
943 if (rdev->asic->set_clock_gating)
944 radeon_set_clock_gating(rdev, 1);
945 }
946 }
947 radeon_apply_clock_quirks(rdev);
948 return 0;
949}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 885dcfac1838..bd74e428bd14 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -39,8 +39,8 @@
39 39
40/* from radeon_encoder.c */ 40/* from radeon_encoder.c */
41extern uint32_t 41extern uint32_t
42radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, 42radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
43 uint8_t dac); 43 uint8_t dac);
44extern void radeon_link_encoder_connector(struct drm_device *dev); 44extern void radeon_link_encoder_connector(struct drm_device *dev);
45 45
46/* from radeon_connector.c */ 46/* from radeon_connector.c */
@@ -55,7 +55,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
55 55
56/* from radeon_legacy_encoder.c */ 56/* from radeon_legacy_encoder.c */
57extern void 57extern void
58radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, 58radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
59 uint32_t supported_device); 59 uint32_t supported_device);
60 60
61/* old legacy ATI BIOS routines */ 61/* old legacy ATI BIOS routines */
@@ -1505,7 +1505,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1505 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1505 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1506 hpd.hpd = RADEON_HPD_NONE; 1506 hpd.hpd = RADEON_HPD_NONE;
1507 radeon_add_legacy_encoder(dev, 1507 radeon_add_legacy_encoder(dev,
1508 radeon_get_encoder_id(dev, 1508 radeon_get_encoder_enum(dev,
1509 ATOM_DEVICE_CRT1_SUPPORT, 1509 ATOM_DEVICE_CRT1_SUPPORT,
1510 1), 1510 1),
1511 ATOM_DEVICE_CRT1_SUPPORT); 1511 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1520,7 +1520,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1520 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0); 1520 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
1521 hpd.hpd = RADEON_HPD_NONE; 1521 hpd.hpd = RADEON_HPD_NONE;
1522 radeon_add_legacy_encoder(dev, 1522 radeon_add_legacy_encoder(dev,
1523 radeon_get_encoder_id(dev, 1523 radeon_get_encoder_enum(dev,
1524 ATOM_DEVICE_LCD1_SUPPORT, 1524 ATOM_DEVICE_LCD1_SUPPORT,
1525 0), 1525 0),
1526 ATOM_DEVICE_LCD1_SUPPORT); 1526 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1535,7 +1535,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1535 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1535 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1536 hpd.hpd = RADEON_HPD_NONE; 1536 hpd.hpd = RADEON_HPD_NONE;
1537 radeon_add_legacy_encoder(dev, 1537 radeon_add_legacy_encoder(dev,
1538 radeon_get_encoder_id(dev, 1538 radeon_get_encoder_enum(dev,
1539 ATOM_DEVICE_CRT1_SUPPORT, 1539 ATOM_DEVICE_CRT1_SUPPORT,
1540 1), 1540 1),
1541 ATOM_DEVICE_CRT1_SUPPORT); 1541 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1550,12 +1550,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1550 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1550 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1551 hpd.hpd = RADEON_HPD_1; 1551 hpd.hpd = RADEON_HPD_1;
1552 radeon_add_legacy_encoder(dev, 1552 radeon_add_legacy_encoder(dev,
1553 radeon_get_encoder_id(dev, 1553 radeon_get_encoder_enum(dev,
1554 ATOM_DEVICE_DFP1_SUPPORT, 1554 ATOM_DEVICE_DFP1_SUPPORT,
1555 0), 1555 0),
1556 ATOM_DEVICE_DFP1_SUPPORT); 1556 ATOM_DEVICE_DFP1_SUPPORT);
1557 radeon_add_legacy_encoder(dev, 1557 radeon_add_legacy_encoder(dev,
1558 radeon_get_encoder_id(dev, 1558 radeon_get_encoder_enum(dev,
1559 ATOM_DEVICE_CRT2_SUPPORT, 1559 ATOM_DEVICE_CRT2_SUPPORT,
1560 2), 1560 2),
1561 ATOM_DEVICE_CRT2_SUPPORT); 1561 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1571,7 +1571,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1571 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1571 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1572 hpd.hpd = RADEON_HPD_NONE; 1572 hpd.hpd = RADEON_HPD_NONE;
1573 radeon_add_legacy_encoder(dev, 1573 radeon_add_legacy_encoder(dev,
1574 radeon_get_encoder_id(dev, 1574 radeon_get_encoder_enum(dev,
1575 ATOM_DEVICE_CRT1_SUPPORT, 1575 ATOM_DEVICE_CRT1_SUPPORT,
1576 1), 1576 1),
1577 ATOM_DEVICE_CRT1_SUPPORT); 1577 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1588,7 +1588,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1588 ddc_i2c.valid = false; 1588 ddc_i2c.valid = false;
1589 hpd.hpd = RADEON_HPD_NONE; 1589 hpd.hpd = RADEON_HPD_NONE;
1590 radeon_add_legacy_encoder(dev, 1590 radeon_add_legacy_encoder(dev,
1591 radeon_get_encoder_id(dev, 1591 radeon_get_encoder_enum(dev,
1592 ATOM_DEVICE_TV1_SUPPORT, 1592 ATOM_DEVICE_TV1_SUPPORT,
1593 2), 1593 2),
1594 ATOM_DEVICE_TV1_SUPPORT); 1594 ATOM_DEVICE_TV1_SUPPORT);
@@ -1607,7 +1607,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1607 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1607 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1608 hpd.hpd = RADEON_HPD_NONE; 1608 hpd.hpd = RADEON_HPD_NONE;
1609 radeon_add_legacy_encoder(dev, 1609 radeon_add_legacy_encoder(dev,
1610 radeon_get_encoder_id(dev, 1610 radeon_get_encoder_enum(dev,
1611 ATOM_DEVICE_LCD1_SUPPORT, 1611 ATOM_DEVICE_LCD1_SUPPORT,
1612 0), 1612 0),
1613 ATOM_DEVICE_LCD1_SUPPORT); 1613 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1619,7 +1619,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1619 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1619 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1620 hpd.hpd = RADEON_HPD_NONE; 1620 hpd.hpd = RADEON_HPD_NONE;
1621 radeon_add_legacy_encoder(dev, 1621 radeon_add_legacy_encoder(dev,
1622 radeon_get_encoder_id(dev, 1622 radeon_get_encoder_enum(dev,
1623 ATOM_DEVICE_CRT2_SUPPORT, 1623 ATOM_DEVICE_CRT2_SUPPORT,
1624 2), 1624 2),
1625 ATOM_DEVICE_CRT2_SUPPORT); 1625 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1631,7 +1631,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1631 ddc_i2c.valid = false; 1631 ddc_i2c.valid = false;
1632 hpd.hpd = RADEON_HPD_NONE; 1632 hpd.hpd = RADEON_HPD_NONE;
1633 radeon_add_legacy_encoder(dev, 1633 radeon_add_legacy_encoder(dev,
1634 radeon_get_encoder_id(dev, 1634 radeon_get_encoder_enum(dev,
1635 ATOM_DEVICE_TV1_SUPPORT, 1635 ATOM_DEVICE_TV1_SUPPORT,
1636 2), 1636 2),
1637 ATOM_DEVICE_TV1_SUPPORT); 1637 ATOM_DEVICE_TV1_SUPPORT);
@@ -1648,7 +1648,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1648 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1648 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1649 hpd.hpd = RADEON_HPD_NONE; 1649 hpd.hpd = RADEON_HPD_NONE;
1650 radeon_add_legacy_encoder(dev, 1650 radeon_add_legacy_encoder(dev,
1651 radeon_get_encoder_id(dev, 1651 radeon_get_encoder_enum(dev,
1652 ATOM_DEVICE_LCD1_SUPPORT, 1652 ATOM_DEVICE_LCD1_SUPPORT,
1653 0), 1653 0),
1654 ATOM_DEVICE_LCD1_SUPPORT); 1654 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1660,12 +1660,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1660 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1660 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1661 hpd.hpd = RADEON_HPD_2; /* ??? */ 1661 hpd.hpd = RADEON_HPD_2; /* ??? */
1662 radeon_add_legacy_encoder(dev, 1662 radeon_add_legacy_encoder(dev,
1663 radeon_get_encoder_id(dev, 1663 radeon_get_encoder_enum(dev,
1664 ATOM_DEVICE_DFP2_SUPPORT, 1664 ATOM_DEVICE_DFP2_SUPPORT,
1665 0), 1665 0),
1666 ATOM_DEVICE_DFP2_SUPPORT); 1666 ATOM_DEVICE_DFP2_SUPPORT);
1667 radeon_add_legacy_encoder(dev, 1667 radeon_add_legacy_encoder(dev,
1668 radeon_get_encoder_id(dev, 1668 radeon_get_encoder_enum(dev,
1669 ATOM_DEVICE_CRT1_SUPPORT, 1669 ATOM_DEVICE_CRT1_SUPPORT,
1670 1), 1670 1),
1671 ATOM_DEVICE_CRT1_SUPPORT); 1671 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1680,7 +1680,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1680 ddc_i2c.valid = false; 1680 ddc_i2c.valid = false;
1681 hpd.hpd = RADEON_HPD_NONE; 1681 hpd.hpd = RADEON_HPD_NONE;
1682 radeon_add_legacy_encoder(dev, 1682 radeon_add_legacy_encoder(dev,
1683 radeon_get_encoder_id(dev, 1683 radeon_get_encoder_enum(dev,
1684 ATOM_DEVICE_TV1_SUPPORT, 1684 ATOM_DEVICE_TV1_SUPPORT,
1685 2), 1685 2),
1686 ATOM_DEVICE_TV1_SUPPORT); 1686 ATOM_DEVICE_TV1_SUPPORT);
@@ -1697,7 +1697,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1697 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1697 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1698 hpd.hpd = RADEON_HPD_NONE; 1698 hpd.hpd = RADEON_HPD_NONE;
1699 radeon_add_legacy_encoder(dev, 1699 radeon_add_legacy_encoder(dev,
1700 radeon_get_encoder_id(dev, 1700 radeon_get_encoder_enum(dev,
1701 ATOM_DEVICE_LCD1_SUPPORT, 1701 ATOM_DEVICE_LCD1_SUPPORT,
1702 0), 1702 0),
1703 ATOM_DEVICE_LCD1_SUPPORT); 1703 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1709,12 +1709,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1709 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1709 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1710 hpd.hpd = RADEON_HPD_1; /* ??? */ 1710 hpd.hpd = RADEON_HPD_1; /* ??? */
1711 radeon_add_legacy_encoder(dev, 1711 radeon_add_legacy_encoder(dev,
1712 radeon_get_encoder_id(dev, 1712 radeon_get_encoder_enum(dev,
1713 ATOM_DEVICE_DFP1_SUPPORT, 1713 ATOM_DEVICE_DFP1_SUPPORT,
1714 0), 1714 0),
1715 ATOM_DEVICE_DFP1_SUPPORT); 1715 ATOM_DEVICE_DFP1_SUPPORT);
1716 radeon_add_legacy_encoder(dev, 1716 radeon_add_legacy_encoder(dev,
1717 radeon_get_encoder_id(dev, 1717 radeon_get_encoder_enum(dev,
1718 ATOM_DEVICE_CRT1_SUPPORT, 1718 ATOM_DEVICE_CRT1_SUPPORT,
1719 1), 1719 1),
1720 ATOM_DEVICE_CRT1_SUPPORT); 1720 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1728,7 +1728,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1728 ddc_i2c.valid = false; 1728 ddc_i2c.valid = false;
1729 hpd.hpd = RADEON_HPD_NONE; 1729 hpd.hpd = RADEON_HPD_NONE;
1730 radeon_add_legacy_encoder(dev, 1730 radeon_add_legacy_encoder(dev,
1731 radeon_get_encoder_id(dev, 1731 radeon_get_encoder_enum(dev,
1732 ATOM_DEVICE_TV1_SUPPORT, 1732 ATOM_DEVICE_TV1_SUPPORT,
1733 2), 1733 2),
1734 ATOM_DEVICE_TV1_SUPPORT); 1734 ATOM_DEVICE_TV1_SUPPORT);
@@ -1745,7 +1745,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1745 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1745 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1746 hpd.hpd = RADEON_HPD_NONE; 1746 hpd.hpd = RADEON_HPD_NONE;
1747 radeon_add_legacy_encoder(dev, 1747 radeon_add_legacy_encoder(dev,
1748 radeon_get_encoder_id(dev, 1748 radeon_get_encoder_enum(dev,
1749 ATOM_DEVICE_LCD1_SUPPORT, 1749 ATOM_DEVICE_LCD1_SUPPORT,
1750 0), 1750 0),
1751 ATOM_DEVICE_LCD1_SUPPORT); 1751 ATOM_DEVICE_LCD1_SUPPORT);
@@ -1757,7 +1757,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1757 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1757 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1758 hpd.hpd = RADEON_HPD_NONE; 1758 hpd.hpd = RADEON_HPD_NONE;
1759 radeon_add_legacy_encoder(dev, 1759 radeon_add_legacy_encoder(dev,
1760 radeon_get_encoder_id(dev, 1760 radeon_get_encoder_enum(dev,
1761 ATOM_DEVICE_CRT1_SUPPORT, 1761 ATOM_DEVICE_CRT1_SUPPORT,
1762 1), 1762 1),
1763 ATOM_DEVICE_CRT1_SUPPORT); 1763 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1769,7 +1769,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1769 ddc_i2c.valid = false; 1769 ddc_i2c.valid = false;
1770 hpd.hpd = RADEON_HPD_NONE; 1770 hpd.hpd = RADEON_HPD_NONE;
1771 radeon_add_legacy_encoder(dev, 1771 radeon_add_legacy_encoder(dev,
1772 radeon_get_encoder_id(dev, 1772 radeon_get_encoder_enum(dev,
1773 ATOM_DEVICE_TV1_SUPPORT, 1773 ATOM_DEVICE_TV1_SUPPORT,
1774 2), 1774 2),
1775 ATOM_DEVICE_TV1_SUPPORT); 1775 ATOM_DEVICE_TV1_SUPPORT);
@@ -1786,12 +1786,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1786 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 1786 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
1787 hpd.hpd = RADEON_HPD_2; /* ??? */ 1787 hpd.hpd = RADEON_HPD_2; /* ??? */
1788 radeon_add_legacy_encoder(dev, 1788 radeon_add_legacy_encoder(dev,
1789 radeon_get_encoder_id(dev, 1789 radeon_get_encoder_enum(dev,
1790 ATOM_DEVICE_DFP2_SUPPORT, 1790 ATOM_DEVICE_DFP2_SUPPORT,
1791 0), 1791 0),
1792 ATOM_DEVICE_DFP2_SUPPORT); 1792 ATOM_DEVICE_DFP2_SUPPORT);
1793 radeon_add_legacy_encoder(dev, 1793 radeon_add_legacy_encoder(dev,
1794 radeon_get_encoder_id(dev, 1794 radeon_get_encoder_enum(dev,
1795 ATOM_DEVICE_CRT2_SUPPORT, 1795 ATOM_DEVICE_CRT2_SUPPORT,
1796 2), 1796 2),
1797 ATOM_DEVICE_CRT2_SUPPORT); 1797 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1806,7 +1806,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1806 ddc_i2c.valid = false; 1806 ddc_i2c.valid = false;
1807 hpd.hpd = RADEON_HPD_NONE; 1807 hpd.hpd = RADEON_HPD_NONE;
1808 radeon_add_legacy_encoder(dev, 1808 radeon_add_legacy_encoder(dev,
1809 radeon_get_encoder_id(dev, 1809 radeon_get_encoder_enum(dev,
1810 ATOM_DEVICE_TV1_SUPPORT, 1810 ATOM_DEVICE_TV1_SUPPORT,
1811 2), 1811 2),
1812 ATOM_DEVICE_TV1_SUPPORT); 1812 ATOM_DEVICE_TV1_SUPPORT);
@@ -1823,12 +1823,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1823 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 1823 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
1824 hpd.hpd = RADEON_HPD_1; /* ??? */ 1824 hpd.hpd = RADEON_HPD_1; /* ??? */
1825 radeon_add_legacy_encoder(dev, 1825 radeon_add_legacy_encoder(dev,
1826 radeon_get_encoder_id(dev, 1826 radeon_get_encoder_enum(dev,
1827 ATOM_DEVICE_DFP1_SUPPORT, 1827 ATOM_DEVICE_DFP1_SUPPORT,
1828 0), 1828 0),
1829 ATOM_DEVICE_DFP1_SUPPORT); 1829 ATOM_DEVICE_DFP1_SUPPORT);
1830 radeon_add_legacy_encoder(dev, 1830 radeon_add_legacy_encoder(dev,
1831 radeon_get_encoder_id(dev, 1831 radeon_get_encoder_enum(dev,
1832 ATOM_DEVICE_CRT2_SUPPORT, 1832 ATOM_DEVICE_CRT2_SUPPORT,
1833 2), 1833 2),
1834 ATOM_DEVICE_CRT2_SUPPORT); 1834 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1842,7 +1842,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1842 ddc_i2c.valid = false; 1842 ddc_i2c.valid = false;
1843 hpd.hpd = RADEON_HPD_NONE; 1843 hpd.hpd = RADEON_HPD_NONE;
1844 radeon_add_legacy_encoder(dev, 1844 radeon_add_legacy_encoder(dev,
1845 radeon_get_encoder_id(dev, 1845 radeon_get_encoder_enum(dev,
1846 ATOM_DEVICE_TV1_SUPPORT, 1846 ATOM_DEVICE_TV1_SUPPORT,
1847 2), 1847 2),
1848 ATOM_DEVICE_TV1_SUPPORT); 1848 ATOM_DEVICE_TV1_SUPPORT);
@@ -1859,7 +1859,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1859 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); 1859 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
1860 hpd.hpd = RADEON_HPD_1; /* ??? */ 1860 hpd.hpd = RADEON_HPD_1; /* ??? */
1861 radeon_add_legacy_encoder(dev, 1861 radeon_add_legacy_encoder(dev,
1862 radeon_get_encoder_id(dev, 1862 radeon_get_encoder_enum(dev,
1863 ATOM_DEVICE_DFP1_SUPPORT, 1863 ATOM_DEVICE_DFP1_SUPPORT,
1864 0), 1864 0),
1865 ATOM_DEVICE_DFP1_SUPPORT); 1865 ATOM_DEVICE_DFP1_SUPPORT);
@@ -1871,7 +1871,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1871 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0); 1871 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
1872 hpd.hpd = RADEON_HPD_NONE; 1872 hpd.hpd = RADEON_HPD_NONE;
1873 radeon_add_legacy_encoder(dev, 1873 radeon_add_legacy_encoder(dev,
1874 radeon_get_encoder_id(dev, 1874 radeon_get_encoder_enum(dev,
1875 ATOM_DEVICE_CRT2_SUPPORT, 1875 ATOM_DEVICE_CRT2_SUPPORT,
1876 2), 1876 2),
1877 ATOM_DEVICE_CRT2_SUPPORT); 1877 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1883,7 +1883,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1883 ddc_i2c.valid = false; 1883 ddc_i2c.valid = false;
1884 hpd.hpd = RADEON_HPD_NONE; 1884 hpd.hpd = RADEON_HPD_NONE;
1885 radeon_add_legacy_encoder(dev, 1885 radeon_add_legacy_encoder(dev,
1886 radeon_get_encoder_id(dev, 1886 radeon_get_encoder_enum(dev,
1887 ATOM_DEVICE_TV1_SUPPORT, 1887 ATOM_DEVICE_TV1_SUPPORT,
1888 2), 1888 2),
1889 ATOM_DEVICE_TV1_SUPPORT); 1889 ATOM_DEVICE_TV1_SUPPORT);
@@ -1900,7 +1900,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1900 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1900 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1901 hpd.hpd = RADEON_HPD_NONE; 1901 hpd.hpd = RADEON_HPD_NONE;
1902 radeon_add_legacy_encoder(dev, 1902 radeon_add_legacy_encoder(dev,
1903 radeon_get_encoder_id(dev, 1903 radeon_get_encoder_enum(dev,
1904 ATOM_DEVICE_CRT1_SUPPORT, 1904 ATOM_DEVICE_CRT1_SUPPORT,
1905 1), 1905 1),
1906 ATOM_DEVICE_CRT1_SUPPORT); 1906 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1912,7 +1912,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1912 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 1912 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
1913 hpd.hpd = RADEON_HPD_NONE; 1913 hpd.hpd = RADEON_HPD_NONE;
1914 radeon_add_legacy_encoder(dev, 1914 radeon_add_legacy_encoder(dev,
1915 radeon_get_encoder_id(dev, 1915 radeon_get_encoder_enum(dev,
1916 ATOM_DEVICE_CRT2_SUPPORT, 1916 ATOM_DEVICE_CRT2_SUPPORT,
1917 2), 1917 2),
1918 ATOM_DEVICE_CRT2_SUPPORT); 1918 ATOM_DEVICE_CRT2_SUPPORT);
@@ -1924,7 +1924,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1924 ddc_i2c.valid = false; 1924 ddc_i2c.valid = false;
1925 hpd.hpd = RADEON_HPD_NONE; 1925 hpd.hpd = RADEON_HPD_NONE;
1926 radeon_add_legacy_encoder(dev, 1926 radeon_add_legacy_encoder(dev,
1927 radeon_get_encoder_id(dev, 1927 radeon_get_encoder_enum(dev,
1928 ATOM_DEVICE_TV1_SUPPORT, 1928 ATOM_DEVICE_TV1_SUPPORT,
1929 2), 1929 2),
1930 ATOM_DEVICE_TV1_SUPPORT); 1930 ATOM_DEVICE_TV1_SUPPORT);
@@ -1941,7 +1941,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1941 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0); 1941 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
1942 hpd.hpd = RADEON_HPD_NONE; 1942 hpd.hpd = RADEON_HPD_NONE;
1943 radeon_add_legacy_encoder(dev, 1943 radeon_add_legacy_encoder(dev,
1944 radeon_get_encoder_id(dev, 1944 radeon_get_encoder_enum(dev,
1945 ATOM_DEVICE_CRT1_SUPPORT, 1945 ATOM_DEVICE_CRT1_SUPPORT,
1946 1), 1946 1),
1947 ATOM_DEVICE_CRT1_SUPPORT); 1947 ATOM_DEVICE_CRT1_SUPPORT);
@@ -1952,7 +1952,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1952 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 1952 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
1953 hpd.hpd = RADEON_HPD_NONE; 1953 hpd.hpd = RADEON_HPD_NONE;
1954 radeon_add_legacy_encoder(dev, 1954 radeon_add_legacy_encoder(dev,
1955 radeon_get_encoder_id(dev, 1955 radeon_get_encoder_enum(dev,
1956 ATOM_DEVICE_CRT2_SUPPORT, 1956 ATOM_DEVICE_CRT2_SUPPORT,
1957 2), 1957 2),
1958 ATOM_DEVICE_CRT2_SUPPORT); 1958 ATOM_DEVICE_CRT2_SUPPORT);
@@ -2109,7 +2109,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2109 else 2109 else
2110 devices = ATOM_DEVICE_DFP1_SUPPORT; 2110 devices = ATOM_DEVICE_DFP1_SUPPORT;
2111 radeon_add_legacy_encoder(dev, 2111 radeon_add_legacy_encoder(dev,
2112 radeon_get_encoder_id 2112 radeon_get_encoder_enum
2113 (dev, devices, 0), 2113 (dev, devices, 0),
2114 devices); 2114 devices);
2115 radeon_add_legacy_connector(dev, i, devices, 2115 radeon_add_legacy_connector(dev, i, devices,
@@ -2123,7 +2123,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2123 if (tmp & 0x1) { 2123 if (tmp & 0x1) {
2124 devices = ATOM_DEVICE_CRT2_SUPPORT; 2124 devices = ATOM_DEVICE_CRT2_SUPPORT;
2125 radeon_add_legacy_encoder(dev, 2125 radeon_add_legacy_encoder(dev,
2126 radeon_get_encoder_id 2126 radeon_get_encoder_enum
2127 (dev, 2127 (dev,
2128 ATOM_DEVICE_CRT2_SUPPORT, 2128 ATOM_DEVICE_CRT2_SUPPORT,
2129 2), 2129 2),
@@ -2131,7 +2131,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2131 } else { 2131 } else {
2132 devices = ATOM_DEVICE_CRT1_SUPPORT; 2132 devices = ATOM_DEVICE_CRT1_SUPPORT;
2133 radeon_add_legacy_encoder(dev, 2133 radeon_add_legacy_encoder(dev,
2134 radeon_get_encoder_id 2134 radeon_get_encoder_enum
2135 (dev, 2135 (dev,
2136 ATOM_DEVICE_CRT1_SUPPORT, 2136 ATOM_DEVICE_CRT1_SUPPORT,
2137 1), 2137 1),
@@ -2151,7 +2151,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2151 if (tmp & 0x1) { 2151 if (tmp & 0x1) {
2152 devices |= ATOM_DEVICE_CRT2_SUPPORT; 2152 devices |= ATOM_DEVICE_CRT2_SUPPORT;
2153 radeon_add_legacy_encoder(dev, 2153 radeon_add_legacy_encoder(dev,
2154 radeon_get_encoder_id 2154 radeon_get_encoder_enum
2155 (dev, 2155 (dev,
2156 ATOM_DEVICE_CRT2_SUPPORT, 2156 ATOM_DEVICE_CRT2_SUPPORT,
2157 2), 2157 2),
@@ -2159,7 +2159,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2159 } else { 2159 } else {
2160 devices |= ATOM_DEVICE_CRT1_SUPPORT; 2160 devices |= ATOM_DEVICE_CRT1_SUPPORT;
2161 radeon_add_legacy_encoder(dev, 2161 radeon_add_legacy_encoder(dev,
2162 radeon_get_encoder_id 2162 radeon_get_encoder_enum
2163 (dev, 2163 (dev,
2164 ATOM_DEVICE_CRT1_SUPPORT, 2164 ATOM_DEVICE_CRT1_SUPPORT,
2165 1), 2165 1),
@@ -2168,7 +2168,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2168 if ((tmp >> 4) & 0x1) { 2168 if ((tmp >> 4) & 0x1) {
2169 devices |= ATOM_DEVICE_DFP2_SUPPORT; 2169 devices |= ATOM_DEVICE_DFP2_SUPPORT;
2170 radeon_add_legacy_encoder(dev, 2170 radeon_add_legacy_encoder(dev,
2171 radeon_get_encoder_id 2171 radeon_get_encoder_enum
2172 (dev, 2172 (dev,
2173 ATOM_DEVICE_DFP2_SUPPORT, 2173 ATOM_DEVICE_DFP2_SUPPORT,
2174 0), 2174 0),
@@ -2177,7 +2177,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2177 } else { 2177 } else {
2178 devices |= ATOM_DEVICE_DFP1_SUPPORT; 2178 devices |= ATOM_DEVICE_DFP1_SUPPORT;
2179 radeon_add_legacy_encoder(dev, 2179 radeon_add_legacy_encoder(dev,
2180 radeon_get_encoder_id 2180 radeon_get_encoder_enum
2181 (dev, 2181 (dev,
2182 ATOM_DEVICE_DFP1_SUPPORT, 2182 ATOM_DEVICE_DFP1_SUPPORT,
2183 0), 2183 0),
@@ -2202,7 +2202,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2202 connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; 2202 connector_object_id = CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
2203 } 2203 }
2204 radeon_add_legacy_encoder(dev, 2204 radeon_add_legacy_encoder(dev,
2205 radeon_get_encoder_id 2205 radeon_get_encoder_enum
2206 (dev, devices, 0), 2206 (dev, devices, 0),
2207 devices); 2207 devices);
2208 radeon_add_legacy_connector(dev, i, devices, 2208 radeon_add_legacy_connector(dev, i, devices,
@@ -2215,7 +2215,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2215 case CONNECTOR_CTV_LEGACY: 2215 case CONNECTOR_CTV_LEGACY:
2216 case CONNECTOR_STV_LEGACY: 2216 case CONNECTOR_STV_LEGACY:
2217 radeon_add_legacy_encoder(dev, 2217 radeon_add_legacy_encoder(dev,
2218 radeon_get_encoder_id 2218 radeon_get_encoder_enum
2219 (dev, 2219 (dev,
2220 ATOM_DEVICE_TV1_SUPPORT, 2220 ATOM_DEVICE_TV1_SUPPORT,
2221 2), 2221 2),
@@ -2242,12 +2242,12 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2242 DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n"); 2242 DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
2243 2243
2244 radeon_add_legacy_encoder(dev, 2244 radeon_add_legacy_encoder(dev,
2245 radeon_get_encoder_id(dev, 2245 radeon_get_encoder_enum(dev,
2246 ATOM_DEVICE_CRT1_SUPPORT, 2246 ATOM_DEVICE_CRT1_SUPPORT,
2247 1), 2247 1),
2248 ATOM_DEVICE_CRT1_SUPPORT); 2248 ATOM_DEVICE_CRT1_SUPPORT);
2249 radeon_add_legacy_encoder(dev, 2249 radeon_add_legacy_encoder(dev,
2250 radeon_get_encoder_id(dev, 2250 radeon_get_encoder_enum(dev,
2251 ATOM_DEVICE_DFP1_SUPPORT, 2251 ATOM_DEVICE_DFP1_SUPPORT,
2252 0), 2252 0),
2253 ATOM_DEVICE_DFP1_SUPPORT); 2253 ATOM_DEVICE_DFP1_SUPPORT);
@@ -2268,7 +2268,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2268 DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n"); 2268 DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
2269 if (crt_info) { 2269 if (crt_info) {
2270 radeon_add_legacy_encoder(dev, 2270 radeon_add_legacy_encoder(dev,
2271 radeon_get_encoder_id(dev, 2271 radeon_get_encoder_enum(dev,
2272 ATOM_DEVICE_CRT1_SUPPORT, 2272 ATOM_DEVICE_CRT1_SUPPORT,
2273 1), 2273 1),
2274 ATOM_DEVICE_CRT1_SUPPORT); 2274 ATOM_DEVICE_CRT1_SUPPORT);
@@ -2297,7 +2297,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2297 COMBIOS_LCD_DDC_INFO_TABLE); 2297 COMBIOS_LCD_DDC_INFO_TABLE);
2298 2298
2299 radeon_add_legacy_encoder(dev, 2299 radeon_add_legacy_encoder(dev,
2300 radeon_get_encoder_id(dev, 2300 radeon_get_encoder_enum(dev,
2301 ATOM_DEVICE_LCD1_SUPPORT, 2301 ATOM_DEVICE_LCD1_SUPPORT,
2302 0), 2302 0),
2303 ATOM_DEVICE_LCD1_SUPPORT); 2303 ATOM_DEVICE_LCD1_SUPPORT);
@@ -2351,7 +2351,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2351 hpd.hpd = RADEON_HPD_NONE; 2351 hpd.hpd = RADEON_HPD_NONE;
2352 ddc_i2c.valid = false; 2352 ddc_i2c.valid = false;
2353 radeon_add_legacy_encoder(dev, 2353 radeon_add_legacy_encoder(dev,
2354 radeon_get_encoder_id 2354 radeon_get_encoder_enum
2355 (dev, 2355 (dev,
2356 ATOM_DEVICE_TV1_SUPPORT, 2356 ATOM_DEVICE_TV1_SUPPORT,
2357 2), 2357 2),
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 47c4b276d30c..a9dd7847d96e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -977,27 +977,29 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
977 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 977 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
978 enum drm_connector_status ret = connector_status_disconnected; 978 enum drm_connector_status ret = connector_status_disconnected;
979 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 979 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
980 u8 sink_type;
981 980
982 if (radeon_connector->edid) { 981 if (radeon_connector->edid) {
983 kfree(radeon_connector->edid); 982 kfree(radeon_connector->edid);
984 radeon_connector->edid = NULL; 983 radeon_connector->edid = NULL;
985 } 984 }
986 985
987 sink_type = radeon_dp_getsinktype(radeon_connector); 986 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
988 if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 987 /* eDP is always DP */
989 (sink_type == CONNECTOR_OBJECT_ID_eDP)) { 988 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
990 if (radeon_dp_getdpcd(radeon_connector)) { 989 if (radeon_dp_getdpcd(radeon_connector))
991 radeon_dig_connector->dp_sink_type = sink_type;
992 ret = connector_status_connected; 990 ret = connector_status_connected;
993 }
994 } else { 991 } else {
995 if (radeon_ddc_probe(radeon_connector)) { 992 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
996 radeon_dig_connector->dp_sink_type = sink_type; 993 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
997 ret = connector_status_connected; 994 if (radeon_dp_getdpcd(radeon_connector))
995 ret = connector_status_connected;
996 } else {
997 if (radeon_ddc_probe(radeon_connector))
998 ret = connector_status_connected;
998 } 999 }
999 } 1000 }
1000 1001
1002 radeon_connector_update_scratch_regs(connector, ret);
1001 return ret; 1003 return ret;
1002} 1004}
1003 1005
@@ -1037,7 +1039,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1037 uint32_t supported_device, 1039 uint32_t supported_device,
1038 int connector_type, 1040 int connector_type,
1039 struct radeon_i2c_bus_rec *i2c_bus, 1041 struct radeon_i2c_bus_rec *i2c_bus,
1040 bool linkb,
1041 uint32_t igp_lane_info, 1042 uint32_t igp_lane_info,
1042 uint16_t connector_object_id, 1043 uint16_t connector_object_id,
1043 struct radeon_hpd *hpd, 1044 struct radeon_hpd *hpd,
@@ -1050,10 +1051,16 @@ radeon_add_atom_connector(struct drm_device *dev,
1050 uint32_t subpixel_order = SubPixelNone; 1051 uint32_t subpixel_order = SubPixelNone;
1051 bool shared_ddc = false; 1052 bool shared_ddc = false;
1052 1053
1053 /* fixme - tv/cv/din */
1054 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1054 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1055 return; 1055 return;
1056 1056
1057 /* if the user selected tv=0 don't try and add the connector */
1058 if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
1059 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
1060 (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
1061 (radeon_tv == 0))
1062 return;
1063
1057 /* see if we already added it */ 1064 /* see if we already added it */
1058 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1065 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1059 radeon_connector = to_radeon_connector(connector); 1066 radeon_connector = to_radeon_connector(connector);
@@ -1128,7 +1135,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1128 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1135 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1129 if (!radeon_dig_connector) 1136 if (!radeon_dig_connector)
1130 goto failed; 1137 goto failed;
1131 radeon_dig_connector->linkb = linkb;
1132 radeon_dig_connector->igp_lane_info = igp_lane_info; 1138 radeon_dig_connector->igp_lane_info = igp_lane_info;
1133 radeon_connector->con_priv = radeon_dig_connector; 1139 radeon_connector->con_priv = radeon_dig_connector;
1134 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1140 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1158,7 +1164,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1158 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1164 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1159 if (!radeon_dig_connector) 1165 if (!radeon_dig_connector)
1160 goto failed; 1166 goto failed;
1161 radeon_dig_connector->linkb = linkb;
1162 radeon_dig_connector->igp_lane_info = igp_lane_info; 1167 radeon_dig_connector->igp_lane_info = igp_lane_info;
1163 radeon_connector->con_priv = radeon_dig_connector; 1168 radeon_connector->con_priv = radeon_dig_connector;
1164 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1169 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
@@ -1182,7 +1187,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1182 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1187 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1183 if (!radeon_dig_connector) 1188 if (!radeon_dig_connector)
1184 goto failed; 1189 goto failed;
1185 radeon_dig_connector->linkb = linkb;
1186 radeon_dig_connector->igp_lane_info = igp_lane_info; 1190 radeon_dig_connector->igp_lane_info = igp_lane_info;
1187 radeon_connector->con_priv = radeon_dig_connector; 1191 radeon_connector->con_priv = radeon_dig_connector;
1188 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); 1192 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
@@ -1211,25 +1215,22 @@ radeon_add_atom_connector(struct drm_device *dev,
1211 case DRM_MODE_CONNECTOR_SVIDEO: 1215 case DRM_MODE_CONNECTOR_SVIDEO:
1212 case DRM_MODE_CONNECTOR_Composite: 1216 case DRM_MODE_CONNECTOR_Composite:
1213 case DRM_MODE_CONNECTOR_9PinDIN: 1217 case DRM_MODE_CONNECTOR_9PinDIN:
1214 if (radeon_tv == 1) { 1218 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1215 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1219 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1216 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1220 radeon_connector->dac_load_detect = true;
1217 radeon_connector->dac_load_detect = true; 1221 drm_connector_attach_property(&radeon_connector->base,
1218 drm_connector_attach_property(&radeon_connector->base, 1222 rdev->mode_info.load_detect_property,
1219 rdev->mode_info.load_detect_property, 1223 1);
1220 1); 1224 drm_connector_attach_property(&radeon_connector->base,
1221 drm_connector_attach_property(&radeon_connector->base, 1225 rdev->mode_info.tv_std_property,
1222 rdev->mode_info.tv_std_property, 1226 radeon_atombios_get_tv_info(rdev));
1223 radeon_atombios_get_tv_info(rdev)); 1227 /* no HPD on analog connectors */
1224 /* no HPD on analog connectors */ 1228 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1225 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1226 }
1227 break; 1229 break;
1228 case DRM_MODE_CONNECTOR_LVDS: 1230 case DRM_MODE_CONNECTOR_LVDS:
1229 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1231 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
1230 if (!radeon_dig_connector) 1232 if (!radeon_dig_connector)
1231 goto failed; 1233 goto failed;
1232 radeon_dig_connector->linkb = linkb;
1233 radeon_dig_connector->igp_lane_info = igp_lane_info; 1234 radeon_dig_connector->igp_lane_info = igp_lane_info;
1234 radeon_connector->con_priv = radeon_dig_connector; 1235 radeon_connector->con_priv = radeon_dig_connector;
1235 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1236 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1275,10 +1276,16 @@ radeon_add_legacy_connector(struct drm_device *dev,
1275 struct radeon_connector *radeon_connector; 1276 struct radeon_connector *radeon_connector;
1276 uint32_t subpixel_order = SubPixelNone; 1277 uint32_t subpixel_order = SubPixelNone;
1277 1278
1278 /* fixme - tv/cv/din */
1279 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 1279 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
1280 return; 1280 return;
1281 1281
1282 /* if the user selected tv=0 don't try and add the connector */
1283 if (((connector_type == DRM_MODE_CONNECTOR_SVIDEO) ||
1284 (connector_type == DRM_MODE_CONNECTOR_Composite) ||
1285 (connector_type == DRM_MODE_CONNECTOR_9PinDIN)) &&
1286 (radeon_tv == 0))
1287 return;
1288
1282 /* see if we already added it */ 1289 /* see if we already added it */
1283 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1290 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1284 radeon_connector = to_radeon_connector(connector); 1291 radeon_connector = to_radeon_connector(connector);
@@ -1350,26 +1357,24 @@ radeon_add_legacy_connector(struct drm_device *dev,
1350 case DRM_MODE_CONNECTOR_SVIDEO: 1357 case DRM_MODE_CONNECTOR_SVIDEO:
1351 case DRM_MODE_CONNECTOR_Composite: 1358 case DRM_MODE_CONNECTOR_Composite:
1352 case DRM_MODE_CONNECTOR_9PinDIN: 1359 case DRM_MODE_CONNECTOR_9PinDIN:
1353 if (radeon_tv == 1) { 1360 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1354 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1361 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1355 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1362 radeon_connector->dac_load_detect = true;
1356 radeon_connector->dac_load_detect = true; 1363 /* RS400,RC410,RS480 chipset seems to report a lot
1357 /* RS400,RC410,RS480 chipset seems to report a lot 1364 * of false positive on load detect, we haven't yet
1358 * of false positive on load detect, we haven't yet 1365 * found a way to make load detect reliable on those
1359 * found a way to make load detect reliable on those 1366 * chipset, thus just disable it for TV.
1360 * chipset, thus just disable it for TV. 1367 */
1361 */ 1368 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
1362 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) 1369 radeon_connector->dac_load_detect = false;
1363 radeon_connector->dac_load_detect = false; 1370 drm_connector_attach_property(&radeon_connector->base,
1364 drm_connector_attach_property(&radeon_connector->base, 1371 rdev->mode_info.load_detect_property,
1365 rdev->mode_info.load_detect_property, 1372 radeon_connector->dac_load_detect);
1366 radeon_connector->dac_load_detect); 1373 drm_connector_attach_property(&radeon_connector->base,
1367 drm_connector_attach_property(&radeon_connector->base, 1374 rdev->mode_info.tv_std_property,
1368 rdev->mode_info.tv_std_property, 1375 radeon_combios_get_tv_info(rdev));
1369 radeon_combios_get_tv_info(rdev)); 1376 /* no HPD on analog connectors */
1370 /* no HPD on analog connectors */ 1377 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1371 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1372 }
1373 break; 1378 break;
1374 case DRM_MODE_CONNECTOR_LVDS: 1379 case DRM_MODE_CONNECTOR_LVDS:
1375 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1380 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4f7a170d1566..256d204a6d24 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
199 mc->mc_vram_size = mc->aper_size; 199 mc->mc_vram_size = mc->aper_size;
200 } 200 }
201 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 201 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
202 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) { 202 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
203 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); 203 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
204 mc->real_vram_size = mc->aper_size; 204 mc->real_vram_size = mc->aper_size;
205 mc->mc_vram_size = mc->aper_size; 205 mc->mc_vram_size = mc->aper_size;
@@ -293,30 +293,20 @@ bool radeon_card_posted(struct radeon_device *rdev)
293void radeon_update_bandwidth_info(struct radeon_device *rdev) 293void radeon_update_bandwidth_info(struct radeon_device *rdev)
294{ 294{
295 fixed20_12 a; 295 fixed20_12 a;
296 u32 sclk, mclk; 296 u32 sclk = rdev->pm.current_sclk;
297 u32 mclk = rdev->pm.current_mclk;
297 298
298 if (rdev->flags & RADEON_IS_IGP) { 299 /* sclk/mclk in Mhz */
299 sclk = radeon_get_engine_clock(rdev); 300 a.full = dfixed_const(100);
300 mclk = rdev->clock.default_mclk; 301 rdev->pm.sclk.full = dfixed_const(sclk);
301 302 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
302 a.full = dfixed_const(100); 303 rdev->pm.mclk.full = dfixed_const(mclk);
303 rdev->pm.sclk.full = dfixed_const(sclk); 304 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
304 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
305 rdev->pm.mclk.full = dfixed_const(mclk);
306 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
307 305
306 if (rdev->flags & RADEON_IS_IGP) {
308 a.full = dfixed_const(16); 307 a.full = dfixed_const(16);
309 /* core_bandwidth = sclk(Mhz) * 16 */ 308 /* core_bandwidth = sclk(Mhz) * 16 */
310 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a); 309 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
311 } else {
312 sclk = radeon_get_engine_clock(rdev);
313 mclk = radeon_get_memory_clock(rdev);
314
315 a.full = dfixed_const(100);
316 rdev->pm.sclk.full = dfixed_const(sclk);
317 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
318 rdev->pm.mclk.full = dfixed_const(mclk);
319 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
320 } 310 }
321} 311}
322 312
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 5764f4d3b4f1..6dd434ad2429 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1094,6 +1094,18 @@ void radeon_modeset_fini(struct radeon_device *rdev)
1094 radeon_i2c_fini(rdev); 1094 radeon_i2c_fini(rdev);
1095} 1095}
1096 1096
1097static bool is_hdtv_mode(struct drm_display_mode *mode)
1098{
1099 /* try and guess if this is a tv or a monitor */
1100 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1101 (mode->vdisplay == 576) || /* 576p */
1102 (mode->vdisplay == 720) || /* 720p */
1103 (mode->vdisplay == 1080)) /* 1080p */
1104 return true;
1105 else
1106 return false;
1107}
1108
1097bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 1109bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1098 struct drm_display_mode *mode, 1110 struct drm_display_mode *mode,
1099 struct drm_display_mode *adjusted_mode) 1111 struct drm_display_mode *adjusted_mode)
@@ -1141,7 +1153,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1141 if (ASIC_IS_AVIVO(rdev) && 1153 if (ASIC_IS_AVIVO(rdev) &&
1142 ((radeon_encoder->underscan_type == UNDERSCAN_ON) || 1154 ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
1143 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && 1155 ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
1144 drm_detect_hdmi_monitor(radeon_connector->edid)))) { 1156 drm_detect_hdmi_monitor(radeon_connector->edid) &&
1157 is_hdtv_mode(mode)))) {
1145 radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; 1158 radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
1146 radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; 1159 radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
1147 radeon_crtc->rmx_type = RMX_FULL; 1160 radeon_crtc->rmx_type = RMX_FULL;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 263c8098d7dd..2c293e8304d6 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -81,7 +81,7 @@ void radeon_setup_encoder_clones(struct drm_device *dev)
81} 81}
82 82
83uint32_t 83uint32_t
84radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) 84radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
85{ 85{
86 struct radeon_device *rdev = dev->dev_private; 86 struct radeon_device *rdev = dev->dev_private;
87 uint32_t ret = 0; 87 uint32_t ret = 0;
@@ -97,59 +97,59 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
97 if ((rdev->family == CHIP_RS300) || 97 if ((rdev->family == CHIP_RS300) ||
98 (rdev->family == CHIP_RS400) || 98 (rdev->family == CHIP_RS400) ||
99 (rdev->family == CHIP_RS480)) 99 (rdev->family == CHIP_RS480))
100 ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; 100 ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
101 else if (ASIC_IS_AVIVO(rdev)) 101 else if (ASIC_IS_AVIVO(rdev))
102 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1; 102 ret = ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1;
103 else 103 else
104 ret = ENCODER_OBJECT_ID_INTERNAL_DAC1; 104 ret = ENCODER_INTERNAL_DAC1_ENUM_ID1;
105 break; 105 break;
106 case 2: /* dac b */ 106 case 2: /* dac b */
107 if (ASIC_IS_AVIVO(rdev)) 107 if (ASIC_IS_AVIVO(rdev))
108 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2; 108 ret = ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1;
109 else { 109 else {
110 /*if (rdev->family == CHIP_R200) 110 /*if (rdev->family == CHIP_R200)
111 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; 111 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
112 else*/ 112 else*/
113 ret = ENCODER_OBJECT_ID_INTERNAL_DAC2; 113 ret = ENCODER_INTERNAL_DAC2_ENUM_ID1;
114 } 114 }
115 break; 115 break;
116 case 3: /* external dac */ 116 case 3: /* external dac */
117 if (ASIC_IS_AVIVO(rdev)) 117 if (ASIC_IS_AVIVO(rdev))
118 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; 118 ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
119 else 119 else
120 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; 120 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
121 break; 121 break;
122 } 122 }
123 break; 123 break;
124 case ATOM_DEVICE_LCD1_SUPPORT: 124 case ATOM_DEVICE_LCD1_SUPPORT:
125 if (ASIC_IS_AVIVO(rdev)) 125 if (ASIC_IS_AVIVO(rdev))
126 ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; 126 ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
127 else 127 else
128 ret = ENCODER_OBJECT_ID_INTERNAL_LVDS; 128 ret = ENCODER_INTERNAL_LVDS_ENUM_ID1;
129 break; 129 break;
130 case ATOM_DEVICE_DFP1_SUPPORT: 130 case ATOM_DEVICE_DFP1_SUPPORT:
131 if ((rdev->family == CHIP_RS300) || 131 if ((rdev->family == CHIP_RS300) ||
132 (rdev->family == CHIP_RS400) || 132 (rdev->family == CHIP_RS400) ||
133 (rdev->family == CHIP_RS480)) 133 (rdev->family == CHIP_RS480))
134 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; 134 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
135 else if (ASIC_IS_AVIVO(rdev)) 135 else if (ASIC_IS_AVIVO(rdev))
136 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1; 136 ret = ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1;
137 else 137 else
138 ret = ENCODER_OBJECT_ID_INTERNAL_TMDS1; 138 ret = ENCODER_INTERNAL_TMDS1_ENUM_ID1;
139 break; 139 break;
140 case ATOM_DEVICE_LCD2_SUPPORT: 140 case ATOM_DEVICE_LCD2_SUPPORT:
141 case ATOM_DEVICE_DFP2_SUPPORT: 141 case ATOM_DEVICE_DFP2_SUPPORT:
142 if ((rdev->family == CHIP_RS600) || 142 if ((rdev->family == CHIP_RS600) ||
143 (rdev->family == CHIP_RS690) || 143 (rdev->family == CHIP_RS690) ||
144 (rdev->family == CHIP_RS740)) 144 (rdev->family == CHIP_RS740))
145 ret = ENCODER_OBJECT_ID_INTERNAL_DDI; 145 ret = ENCODER_INTERNAL_DDI_ENUM_ID1;
146 else if (ASIC_IS_AVIVO(rdev)) 146 else if (ASIC_IS_AVIVO(rdev))
147 ret = ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1; 147 ret = ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1;
148 else 148 else
149 ret = ENCODER_OBJECT_ID_INTERNAL_DVO1; 149 ret = ENCODER_INTERNAL_DVO1_ENUM_ID1;
150 break; 150 break;
151 case ATOM_DEVICE_DFP3_SUPPORT: 151 case ATOM_DEVICE_DFP3_SUPPORT:
152 ret = ENCODER_OBJECT_ID_INTERNAL_LVTM1; 152 ret = ENCODER_INTERNAL_LVTM1_ENUM_ID1;
153 break; 153 break;
154 } 154 }
155 155
@@ -228,32 +228,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
228 return NULL; 228 return NULL;
229} 229}
230 230
231static struct radeon_connector_atom_dig *
232radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
233{
234 struct drm_device *dev = encoder->dev;
235 struct radeon_device *rdev = dev->dev_private;
236 struct drm_connector *connector;
237 struct radeon_connector *radeon_connector;
238 struct radeon_connector_atom_dig *dig_connector;
239
240 if (!rdev->is_atom_bios)
241 return NULL;
242
243 connector = radeon_get_connector_for_encoder(encoder);
244 if (!connector)
245 return NULL;
246
247 radeon_connector = to_radeon_connector(connector);
248
249 if (!radeon_connector->con_priv)
250 return NULL;
251
252 dig_connector = radeon_connector->con_priv;
253
254 return dig_connector;
255}
256
257void radeon_panel_mode_fixup(struct drm_encoder *encoder, 231void radeon_panel_mode_fixup(struct drm_encoder *encoder,
258 struct drm_display_mode *adjusted_mode) 232 struct drm_display_mode *adjusted_mode)
259{ 233{
@@ -512,14 +486,12 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
512 struct radeon_device *rdev = dev->dev_private; 486 struct radeon_device *rdev = dev->dev_private;
513 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 487 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
514 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 488 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
515 struct radeon_connector_atom_dig *dig_connector =
516 radeon_get_atom_connector_priv_from_encoder(encoder);
517 union lvds_encoder_control args; 489 union lvds_encoder_control args;
518 int index = 0; 490 int index = 0;
519 int hdmi_detected = 0; 491 int hdmi_detected = 0;
520 uint8_t frev, crev; 492 uint8_t frev, crev;
521 493
522 if (!dig || !dig_connector) 494 if (!dig)
523 return; 495 return;
524 496
525 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 497 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
@@ -562,7 +534,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
562 if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) 534 if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
563 args.v1.ucMisc |= (1 << 1); 535 args.v1.ucMisc |= (1 << 1);
564 } else { 536 } else {
565 if (dig_connector->linkb) 537 if (dig->linkb)
566 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; 538 args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
567 if (radeon_encoder->pixel_clock > 165000) 539 if (radeon_encoder->pixel_clock > 165000)
568 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; 540 args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -601,7 +573,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
601 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; 573 args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
602 } 574 }
603 } else { 575 } else {
604 if (dig_connector->linkb) 576 if (dig->linkb)
605 args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; 577 args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
606 if (radeon_encoder->pixel_clock > 165000) 578 if (radeon_encoder->pixel_clock > 165000)
607 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; 579 args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
@@ -623,6 +595,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
623int 595int
624atombios_get_encoder_mode(struct drm_encoder *encoder) 596atombios_get_encoder_mode(struct drm_encoder *encoder)
625{ 597{
598 struct drm_device *dev = encoder->dev;
599 struct radeon_device *rdev = dev->dev_private;
626 struct drm_connector *connector; 600 struct drm_connector *connector;
627 struct radeon_connector *radeon_connector; 601 struct radeon_connector *radeon_connector;
628 struct radeon_connector_atom_dig *dig_connector; 602 struct radeon_connector_atom_dig *dig_connector;
@@ -636,9 +610,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
636 switch (connector->connector_type) { 610 switch (connector->connector_type) {
637 case DRM_MODE_CONNECTOR_DVII: 611 case DRM_MODE_CONNECTOR_DVII:
638 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 612 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
639 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 613 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
640 return ATOM_ENCODER_MODE_HDMI; 614 /* fix me */
641 else if (radeon_connector->use_digital) 615 if (ASIC_IS_DCE4(rdev))
616 return ATOM_ENCODER_MODE_DVI;
617 else
618 return ATOM_ENCODER_MODE_HDMI;
619 } else if (radeon_connector->use_digital)
642 return ATOM_ENCODER_MODE_DVI; 620 return ATOM_ENCODER_MODE_DVI;
643 else 621 else
644 return ATOM_ENCODER_MODE_CRT; 622 return ATOM_ENCODER_MODE_CRT;
@@ -646,9 +624,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
646 case DRM_MODE_CONNECTOR_DVID: 624 case DRM_MODE_CONNECTOR_DVID:
647 case DRM_MODE_CONNECTOR_HDMIA: 625 case DRM_MODE_CONNECTOR_HDMIA:
648 default: 626 default:
649 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 627 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
650 return ATOM_ENCODER_MODE_HDMI; 628 /* fix me */
651 else 629 if (ASIC_IS_DCE4(rdev))
630 return ATOM_ENCODER_MODE_DVI;
631 else
632 return ATOM_ENCODER_MODE_HDMI;
633 } else
652 return ATOM_ENCODER_MODE_DVI; 634 return ATOM_ENCODER_MODE_DVI;
653 break; 635 break;
654 case DRM_MODE_CONNECTOR_LVDS: 636 case DRM_MODE_CONNECTOR_LVDS:
@@ -660,9 +642,13 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
660 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 642 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
661 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 643 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
662 return ATOM_ENCODER_MODE_DP; 644 return ATOM_ENCODER_MODE_DP;
663 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) 645 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
664 return ATOM_ENCODER_MODE_HDMI; 646 /* fix me */
665 else 647 if (ASIC_IS_DCE4(rdev))
648 return ATOM_ENCODER_MODE_DVI;
649 else
650 return ATOM_ENCODER_MODE_HDMI;
651 } else
666 return ATOM_ENCODER_MODE_DVI; 652 return ATOM_ENCODER_MODE_DVI;
667 break; 653 break;
668 case DRM_MODE_CONNECTOR_DVIA: 654 case DRM_MODE_CONNECTOR_DVIA:
@@ -729,13 +715,24 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
729 struct radeon_device *rdev = dev->dev_private; 715 struct radeon_device *rdev = dev->dev_private;
730 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 716 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
731 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 717 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
732 struct radeon_connector_atom_dig *dig_connector = 718 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
733 radeon_get_atom_connector_priv_from_encoder(encoder);
734 union dig_encoder_control args; 719 union dig_encoder_control args;
735 int index = 0; 720 int index = 0;
736 uint8_t frev, crev; 721 uint8_t frev, crev;
722 int dp_clock = 0;
723 int dp_lane_count = 0;
724
725 if (connector) {
726 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
727 struct radeon_connector_atom_dig *dig_connector =
728 radeon_connector->con_priv;
737 729
738 if (!dig || !dig_connector) 730 dp_clock = dig_connector->dp_clock;
731 dp_lane_count = dig_connector->dp_lane_count;
732 }
733
734 /* no dig encoder assigned */
735 if (dig->dig_encoder == -1)
739 return; 736 return;
740 737
741 memset(&args, 0, sizeof(args)); 738 memset(&args, 0, sizeof(args));
@@ -757,9 +754,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
757 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); 754 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
758 755
759 if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { 756 if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
760 if (dig_connector->dp_clock == 270000) 757 if (dp_clock == 270000)
761 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; 758 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
762 args.v1.ucLaneNum = dig_connector->dp_lane_count; 759 args.v1.ucLaneNum = dp_lane_count;
763 } else if (radeon_encoder->pixel_clock > 165000) 760 } else if (radeon_encoder->pixel_clock > 165000)
764 args.v1.ucLaneNum = 8; 761 args.v1.ucLaneNum = 8;
765 else 762 else
@@ -781,7 +778,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
781 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; 778 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
782 break; 779 break;
783 } 780 }
784 if (dig_connector->linkb) 781 if (dig->linkb)
785 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; 782 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
786 else 783 else
787 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; 784 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
@@ -804,38 +801,47 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
804 struct radeon_device *rdev = dev->dev_private; 801 struct radeon_device *rdev = dev->dev_private;
805 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 802 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
806 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 803 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
807 struct radeon_connector_atom_dig *dig_connector = 804 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
808 radeon_get_atom_connector_priv_from_encoder(encoder);
809 struct drm_connector *connector;
810 struct radeon_connector *radeon_connector;
811 union dig_transmitter_control args; 805 union dig_transmitter_control args;
812 int index = 0; 806 int index = 0;
813 uint8_t frev, crev; 807 uint8_t frev, crev;
814 bool is_dp = false; 808 bool is_dp = false;
815 int pll_id = 0; 809 int pll_id = 0;
810 int dp_clock = 0;
811 int dp_lane_count = 0;
812 int connector_object_id = 0;
813 int igp_lane_info = 0;
816 814
817 if (!dig || !dig_connector) 815 if (connector) {
818 return; 816 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
817 struct radeon_connector_atom_dig *dig_connector =
818 radeon_connector->con_priv;
819 819
820 connector = radeon_get_connector_for_encoder(encoder); 820 dp_clock = dig_connector->dp_clock;
821 radeon_connector = to_radeon_connector(connector); 821 dp_lane_count = dig_connector->dp_lane_count;
822 connector_object_id =
823 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
824 igp_lane_info = dig_connector->igp_lane_info;
825 }
826
827 /* no dig encoder assigned */
828 if (dig->dig_encoder == -1)
829 return;
822 830
823 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) 831 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
824 is_dp = true; 832 is_dp = true;
825 833
826 memset(&args, 0, sizeof(args)); 834 memset(&args, 0, sizeof(args));
827 835
828 if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev)) 836 switch (radeon_encoder->encoder_id) {
837 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
838 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
839 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
829 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); 840 index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
830 else { 841 break;
831 switch (radeon_encoder->encoder_id) { 842 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
832 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 843 index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
833 index = GetIndexIntoMasterTable(COMMAND, DIG1TransmitterControl); 844 break;
834 break;
835 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
836 index = GetIndexIntoMasterTable(COMMAND, DIG2TransmitterControl);
837 break;
838 }
839 } 845 }
840 846
841 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 847 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
@@ -843,14 +849,14 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
843 849
844 args.v1.ucAction = action; 850 args.v1.ucAction = action;
845 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 851 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
846 args.v1.usInitInfo = radeon_connector->connector_object_id; 852 args.v1.usInitInfo = connector_object_id;
847 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { 853 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
848 args.v1.asMode.ucLaneSel = lane_num; 854 args.v1.asMode.ucLaneSel = lane_num;
849 args.v1.asMode.ucLaneSet = lane_set; 855 args.v1.asMode.ucLaneSet = lane_set;
850 } else { 856 } else {
851 if (is_dp) 857 if (is_dp)
852 args.v1.usPixelClock = 858 args.v1.usPixelClock =
853 cpu_to_le16(dig_connector->dp_clock / 10); 859 cpu_to_le16(dp_clock / 10);
854 else if (radeon_encoder->pixel_clock > 165000) 860 else if (radeon_encoder->pixel_clock > 165000)
855 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 861 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
856 else 862 else
@@ -858,13 +864,13 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
858 } 864 }
859 if (ASIC_IS_DCE4(rdev)) { 865 if (ASIC_IS_DCE4(rdev)) {
860 if (is_dp) 866 if (is_dp)
861 args.v3.ucLaneNum = dig_connector->dp_lane_count; 867 args.v3.ucLaneNum = dp_lane_count;
862 else if (radeon_encoder->pixel_clock > 165000) 868 else if (radeon_encoder->pixel_clock > 165000)
863 args.v3.ucLaneNum = 8; 869 args.v3.ucLaneNum = 8;
864 else 870 else
865 args.v3.ucLaneNum = 4; 871 args.v3.ucLaneNum = 4;
866 872
867 if (dig_connector->linkb) { 873 if (dig->linkb) {
868 args.v3.acConfig.ucLinkSel = 1; 874 args.v3.acConfig.ucLinkSel = 1;
869 args.v3.acConfig.ucEncoderSel = 1; 875 args.v3.acConfig.ucEncoderSel = 1;
870 } 876 }
@@ -904,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
904 } 910 }
905 } else if (ASIC_IS_DCE32(rdev)) { 911 } else if (ASIC_IS_DCE32(rdev)) {
906 args.v2.acConfig.ucEncoderSel = dig->dig_encoder; 912 args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
907 if (dig_connector->linkb) 913 if (dig->linkb)
908 args.v2.acConfig.ucLinkSel = 1; 914 args.v2.acConfig.ucLinkSel = 1;
909 915
910 switch (radeon_encoder->encoder_id) { 916 switch (radeon_encoder->encoder_id) {
@@ -938,23 +944,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
938 if ((rdev->flags & RADEON_IS_IGP) && 944 if ((rdev->flags & RADEON_IS_IGP) &&
939 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { 945 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
940 if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { 946 if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
941 if (dig_connector->igp_lane_info & 0x1) 947 if (igp_lane_info & 0x1)
942 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; 948 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
943 else if (dig_connector->igp_lane_info & 0x2) 949 else if (igp_lane_info & 0x2)
944 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; 950 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
945 else if (dig_connector->igp_lane_info & 0x4) 951 else if (igp_lane_info & 0x4)
946 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; 952 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
947 else if (dig_connector->igp_lane_info & 0x8) 953 else if (igp_lane_info & 0x8)
948 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; 954 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
949 } else { 955 } else {
950 if (dig_connector->igp_lane_info & 0x3) 956 if (igp_lane_info & 0x3)
951 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; 957 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
952 else if (dig_connector->igp_lane_info & 0xc) 958 else if (igp_lane_info & 0xc)
953 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; 959 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
954 } 960 }
955 } 961 }
956 962
957 if (dig_connector->linkb) 963 if (dig->linkb)
958 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; 964 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
959 else 965 else
960 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; 966 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
@@ -1072,8 +1078,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1072 if (is_dig) { 1078 if (is_dig) {
1073 switch (mode) { 1079 switch (mode) {
1074 case DRM_MODE_DPMS_ON: 1080 case DRM_MODE_DPMS_ON:
1075 if (!ASIC_IS_DCE4(rdev)) 1081 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1076 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1077 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1082 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1078 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1083 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1079 1084
@@ -1085,8 +1090,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1085 case DRM_MODE_DPMS_STANDBY: 1090 case DRM_MODE_DPMS_STANDBY:
1086 case DRM_MODE_DPMS_SUSPEND: 1091 case DRM_MODE_DPMS_SUSPEND:
1087 case DRM_MODE_DPMS_OFF: 1092 case DRM_MODE_DPMS_OFF:
1088 if (!ASIC_IS_DCE4(rdev)) 1093 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1089 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1090 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1094 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1091 if (ASIC_IS_DCE4(rdev)) 1095 if (ASIC_IS_DCE4(rdev))
1092 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); 1096 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
@@ -1290,24 +1294,22 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1290 uint32_t dig_enc_in_use = 0; 1294 uint32_t dig_enc_in_use = 0;
1291 1295
1292 if (ASIC_IS_DCE4(rdev)) { 1296 if (ASIC_IS_DCE4(rdev)) {
1293 struct radeon_connector_atom_dig *dig_connector = 1297 dig = radeon_encoder->enc_priv;
1294 radeon_get_atom_connector_priv_from_encoder(encoder);
1295
1296 switch (radeon_encoder->encoder_id) { 1298 switch (radeon_encoder->encoder_id) {
1297 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 1299 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1298 if (dig_connector->linkb) 1300 if (dig->linkb)
1299 return 1; 1301 return 1;
1300 else 1302 else
1301 return 0; 1303 return 0;
1302 break; 1304 break;
1303 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1305 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1304 if (dig_connector->linkb) 1306 if (dig->linkb)
1305 return 3; 1307 return 3;
1306 else 1308 else
1307 return 2; 1309 return 2;
1308 break; 1310 break;
1309 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1311 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1310 if (dig_connector->linkb) 1312 if (dig->linkb)
1311 return 5; 1313 return 5;
1312 else 1314 else
1313 return 4; 1315 return 4;
@@ -1641,6 +1643,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
1641struct radeon_encoder_atom_dig * 1643struct radeon_encoder_atom_dig *
1642radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) 1644radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1643{ 1645{
1646 int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1644 struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); 1647 struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
1645 1648
1646 if (!dig) 1649 if (!dig)
@@ -1650,11 +1653,16 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1650 dig->coherent_mode = true; 1653 dig->coherent_mode = true;
1651 dig->dig_encoder = -1; 1654 dig->dig_encoder = -1;
1652 1655
1656 if (encoder_enum == 2)
1657 dig->linkb = true;
1658 else
1659 dig->linkb = false;
1660
1653 return dig; 1661 return dig;
1654} 1662}
1655 1663
1656void 1664void
1657radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) 1665radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
1658{ 1666{
1659 struct radeon_device *rdev = dev->dev_private; 1667 struct radeon_device *rdev = dev->dev_private;
1660 struct drm_encoder *encoder; 1668 struct drm_encoder *encoder;
@@ -1663,7 +1671,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1663 /* see if we already added it */ 1671 /* see if we already added it */
1664 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1672 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1665 radeon_encoder = to_radeon_encoder(encoder); 1673 radeon_encoder = to_radeon_encoder(encoder);
1666 if (radeon_encoder->encoder_id == encoder_id) { 1674 if (radeon_encoder->encoder_enum == encoder_enum) {
1667 radeon_encoder->devices |= supported_device; 1675 radeon_encoder->devices |= supported_device;
1668 return; 1676 return;
1669 } 1677 }
@@ -1691,7 +1699,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1691 1699
1692 radeon_encoder->enc_priv = NULL; 1700 radeon_encoder->enc_priv = NULL;
1693 1701
1694 radeon_encoder->encoder_id = encoder_id; 1702 radeon_encoder->encoder_enum = encoder_enum;
1703 radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1695 radeon_encoder->devices = supported_device; 1704 radeon_encoder->devices = supported_device;
1696 radeon_encoder->rmx_type = RMX_OFF; 1705 radeon_encoder->rmx_type = RMX_OFF;
1697 radeon_encoder->underscan_type = UNDERSCAN_OFF; 1706 radeon_encoder->underscan_type = UNDERSCAN_OFF;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index dbf86962bdd1..c74a8b20d941 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -118,7 +118,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
118 aligned_size = ALIGN(size, PAGE_SIZE); 118 aligned_size = ALIGN(size, PAGE_SIZE);
119 ret = radeon_gem_object_create(rdev, aligned_size, 0, 119 ret = radeon_gem_object_create(rdev, aligned_size, 0,
120 RADEON_GEM_DOMAIN_VRAM, 120 RADEON_GEM_DOMAIN_VRAM,
121 false, ttm_bo_type_kernel, 121 false, true,
122 &gobj); 122 &gobj);
123 if (ret) { 123 if (ret) {
124 printk(KERN_ERR "failed to allocate framebuffer (%d)\n", 124 printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index bfd2ce5f5372..6a13ee38a5b9 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -99,6 +99,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
99 } 99 }
100 } 100 }
101 101
102 /* switch the pads to ddc mode */
103 if (ASIC_IS_DCE3(rdev) && rec->hw_capable) {
104 temp = RREG32(rec->mask_clk_reg);
105 temp &= ~(1 << 16);
106 WREG32(rec->mask_clk_reg, temp);
107 }
108
102 /* clear the output pin values */ 109 /* clear the output pin values */
103 temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; 110 temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
104 WREG32(rec->a_clk_reg, temp); 111 WREG32(rec->a_clk_reg, temp);
@@ -206,7 +213,7 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
206 213
207static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) 214static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
208{ 215{
209 u32 sclk = radeon_get_engine_clock(rdev); 216 u32 sclk = rdev->pm.current_sclk;
210 u32 prescale = 0; 217 u32 prescale = 0;
211 u32 nm; 218 u32 nm;
212 u8 n, m, loop; 219 u8 n, m, loop;
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 059bfa4098d7..a108c7ed14f5 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
121 * chips. Disable MSI on them for now. 121 * chips. Disable MSI on them for now.
122 */ 122 */
123 if ((rdev->family >= CHIP_RV380) && 123 if ((rdev->family >= CHIP_RV380) &&
124 (!(rdev->flags & RADEON_IS_IGP))) { 124 (!(rdev->flags & RADEON_IS_IGP)) &&
125 (!(rdev->flags & RADEON_IS_AGP))) {
125 int ret = pci_enable_msi(rdev->pdev); 126 int ret = pci_enable_msi(rdev->pdev);
126 if (!ret) { 127 if (!ret) {
127 rdev->msi_enabled = 1; 128 rdev->msi_enabled = 1;
128 DRM_INFO("radeon: using MSI.\n"); 129 dev_info(rdev->dev, "radeon: using MSI.\n");
129 } 130 }
130 } 131 }
131 rdev->irq.installed = true; 132 rdev->irq.installed = true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index b1c8ace5f080..5eee3c41d124 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -161,6 +161,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
161 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 161 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
162 return -EINVAL; 162 return -EINVAL;
163 } 163 }
164 break;
164 case RADEON_INFO_WANT_HYPERZ: 165 case RADEON_INFO_WANT_HYPERZ:
165 /* The "value" here is both an input and output parameter. 166 /* The "value" here is both an input and output parameter.
166 * If the input value is 1, filp requests hyper-z access. 167 * If the input value is 1, filp requests hyper-z access.
@@ -323,45 +324,45 @@ KMS_INVALID_IOCTL(radeon_surface_free_kms)
323 324
324 325
325struct drm_ioctl_desc radeon_ioctls_kms[] = { 326struct drm_ioctl_desc radeon_ioctls_kms[] = {
326 DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 327 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
327 DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 328 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
328 DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 329 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
329 DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 330 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
330 DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH), 331 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
331 DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH), 332 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
332 DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH), 333 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
333 DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH), 334 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
334 DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH), 335 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
335 DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH), 336 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
336 DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH), 337 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
337 DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH), 338 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
338 DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH), 339 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
339 DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH), 340 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
340 DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 341 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
341 DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH), 342 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
342 DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH), 343 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
343 DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH), 344 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
344 DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH), 345 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
345 DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH), 346 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
346 DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free_kms, DRM_AUTH), 347 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
347 DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 348 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
348 DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH), 349 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
349 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH), 350 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
350 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH), 351 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
351 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), 352 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
352 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), 353 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
353 /* KMS */ 354 /* KMS */
354 DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 355 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
355 DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), 356 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
356 DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), 357 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
357 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), 358 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
358 DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), 359 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
359 DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), 360 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
360 DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), 361 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
361 DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), 362 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
362 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), 363 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
363 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 364 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
364 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), 365 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
365 DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 366 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
366}; 367};
367int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 368int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 989df519a1e4..305049afde15 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
272 if (!ref_div) 272 if (!ref_div)
273 return 1; 273 return 1;
274 274
275 vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div; 275 vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div;
276 276
277 /* 277 /*
278 * This is horribly crude: the VCO frequency range is divided into 278 * This is horribly crude: the VCO frequency range is divided into
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index b8149cbc0c70..0b8397000f4c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1345,7 +1345,7 @@ static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct ra
1345} 1345}
1346 1346
1347void 1347void
1348radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) 1348radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
1349{ 1349{
1350 struct radeon_device *rdev = dev->dev_private; 1350 struct radeon_device *rdev = dev->dev_private;
1351 struct drm_encoder *encoder; 1351 struct drm_encoder *encoder;
@@ -1354,7 +1354,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1354 /* see if we already added it */ 1354 /* see if we already added it */
1355 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1355 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1356 radeon_encoder = to_radeon_encoder(encoder); 1356 radeon_encoder = to_radeon_encoder(encoder);
1357 if (radeon_encoder->encoder_id == encoder_id) { 1357 if (radeon_encoder->encoder_enum == encoder_enum) {
1358 radeon_encoder->devices |= supported_device; 1358 radeon_encoder->devices |= supported_device;
1359 return; 1359 return;
1360 } 1360 }
@@ -1374,7 +1374,8 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1374 1374
1375 radeon_encoder->enc_priv = NULL; 1375 radeon_encoder->enc_priv = NULL;
1376 1376
1377 radeon_encoder->encoder_id = encoder_id; 1377 radeon_encoder->encoder_enum = encoder_enum;
1378 radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1378 radeon_encoder->devices = supported_device; 1379 radeon_encoder->devices = supported_device;
1379 radeon_encoder->rmx_type = RMX_OFF; 1380 radeon_encoder->rmx_type = RMX_OFF;
1380 1381
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5bbc086b9267..efbe975312dc 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -342,6 +342,7 @@ struct radeon_atom_ss {
342}; 342};
343 343
344struct radeon_encoder_atom_dig { 344struct radeon_encoder_atom_dig {
345 bool linkb;
345 /* atom dig */ 346 /* atom dig */
346 bool coherent_mode; 347 bool coherent_mode;
347 int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */ 348 int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
@@ -360,6 +361,7 @@ struct radeon_encoder_atom_dac {
360 361
361struct radeon_encoder { 362struct radeon_encoder {
362 struct drm_encoder base; 363 struct drm_encoder base;
364 uint32_t encoder_enum;
363 uint32_t encoder_id; 365 uint32_t encoder_id;
364 uint32_t devices; 366 uint32_t devices;
365 uint32_t active_device; 367 uint32_t active_device;
@@ -378,7 +380,6 @@ struct radeon_encoder {
378 380
379struct radeon_connector_atom_dig { 381struct radeon_connector_atom_dig {
380 uint32_t igp_lane_info; 382 uint32_t igp_lane_info;
381 bool linkb;
382 /* displayport */ 383 /* displayport */
383 struct radeon_i2c_chan *dp_i2c_bus; 384 struct radeon_i2c_chan *dp_i2c_bus;
384 u8 dpcd[8]; 385 u8 dpcd[8];
@@ -599,7 +600,6 @@ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct d
599void radeon_enc_destroy(struct drm_encoder *encoder); 600void radeon_enc_destroy(struct drm_encoder *encoder);
600void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); 601void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
601void radeon_combios_asic_init(struct drm_device *dev); 602void radeon_combios_asic_init(struct drm_device *dev);
602extern int radeon_static_clocks_init(struct drm_device *dev);
603bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 603bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
604 struct drm_display_mode *mode, 604 struct drm_display_mode *mode,
605 struct drm_display_mode *adjusted_mode); 605 struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 58038f5cab38..f87efec76236 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -226,6 +226,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
226{ 226{
227 int i; 227 int i;
228 228
229 /* no need to take locks, etc. if nothing's going to change */
230 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
231 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
232 return;
233
229 mutex_lock(&rdev->ddev->struct_mutex); 234 mutex_lock(&rdev->ddev->struct_mutex);
230 mutex_lock(&rdev->vram_mutex); 235 mutex_lock(&rdev->vram_mutex);
231 mutex_lock(&rdev->cp.mutex); 236 mutex_lock(&rdev->cp.mutex);
@@ -632,8 +637,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
632 } 637 }
633 638
634 radeon_hwmon_fini(rdev); 639 radeon_hwmon_fini(rdev);
635 if (rdev->pm.i2c_bus)
636 radeon_i2c_destroy(rdev->pm.i2c_bus);
637} 640}
638 641
639void radeon_pm_compute_clocks(struct radeon_device *rdev) 642void radeon_pm_compute_clocks(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index b3ba44c0a818..4ae5a3d1074e 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3228,34 +3228,34 @@ void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
3228} 3228}
3229 3229
3230struct drm_ioctl_desc radeon_ioctls[] = { 3230struct drm_ioctl_desc radeon_ioctls[] = {
3231 DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3231 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3232 DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3232 DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3233 DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3233 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3234 DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3234 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3235 DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH), 3235 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
3236 DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH), 3236 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
3237 DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH), 3237 DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset, DRM_AUTH),
3238 DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH), 3238 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
3239 DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH), 3239 DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
3240 DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH), 3240 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
3241 DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH), 3241 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
3242 DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH), 3242 DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
3243 DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH), 3243 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
3244 DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH), 3244 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
3245 DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3245 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3246 DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH), 3246 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
3247 DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH), 3247 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
3248 DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH), 3248 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
3249 DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH), 3249 DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
3250 DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH), 3250 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
3251 DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH), 3251 DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free, DRM_AUTH),
3252 DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 3252 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
3253 DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH), 3253 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
3254 DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH), 3254 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
3255 DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH), 3255 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
3256 DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH), 3256 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
3257 DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH), 3257 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH),
3258 DRM_IOCTL_DEF(DRM_RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH) 3258 DRM_IOCTL_DEF_DRV(RADEON_CS, r600_cs_legacy_ioctl, DRM_AUTH)
3259}; 3259};
3260 3260
3261int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls); 3261int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index f1c796810117..bfa59db374d2 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -905,6 +905,54 @@ static void rv770_gpu_init(struct radeon_device *rdev)
905 905
906} 906}
907 907
908static int rv770_vram_scratch_init(struct radeon_device *rdev)
909{
910 int r;
911 u64 gpu_addr;
912
913 if (rdev->vram_scratch.robj == NULL) {
914 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
915 true, RADEON_GEM_DOMAIN_VRAM,
916 &rdev->vram_scratch.robj);
917 if (r) {
918 return r;
919 }
920 }
921
922 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
923 if (unlikely(r != 0))
924 return r;
925 r = radeon_bo_pin(rdev->vram_scratch.robj,
926 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
927 if (r) {
928 radeon_bo_unreserve(rdev->vram_scratch.robj);
929 return r;
930 }
931 r = radeon_bo_kmap(rdev->vram_scratch.robj,
932 (void **)&rdev->vram_scratch.ptr);
933 if (r)
934 radeon_bo_unpin(rdev->vram_scratch.robj);
935 radeon_bo_unreserve(rdev->vram_scratch.robj);
936
937 return r;
938}
939
940static void rv770_vram_scratch_fini(struct radeon_device *rdev)
941{
942 int r;
943
944 if (rdev->vram_scratch.robj == NULL) {
945 return;
946 }
947 r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
948 if (likely(r == 0)) {
949 radeon_bo_kunmap(rdev->vram_scratch.robj);
950 radeon_bo_unpin(rdev->vram_scratch.robj);
951 radeon_bo_unreserve(rdev->vram_scratch.robj);
952 }
953 radeon_bo_unref(&rdev->vram_scratch.robj);
954}
955
908int rv770_mc_init(struct radeon_device *rdev) 956int rv770_mc_init(struct radeon_device *rdev)
909{ 957{
910 u32 tmp; 958 u32 tmp;
@@ -970,6 +1018,9 @@ static int rv770_startup(struct radeon_device *rdev)
970 if (r) 1018 if (r)
971 return r; 1019 return r;
972 } 1020 }
1021 r = rv770_vram_scratch_init(rdev);
1022 if (r)
1023 return r;
973 rv770_gpu_init(rdev); 1024 rv770_gpu_init(rdev);
974 r = r600_blit_init(rdev); 1025 r = r600_blit_init(rdev);
975 if (r) { 1026 if (r) {
@@ -1023,11 +1074,6 @@ int rv770_resume(struct radeon_device *rdev)
1023 */ 1074 */
1024 /* post card */ 1075 /* post card */
1025 atom_asic_init(rdev->mode_info.atom_context); 1076 atom_asic_init(rdev->mode_info.atom_context);
1026 /* Initialize clocks */
1027 r = radeon_clocks_init(rdev);
1028 if (r) {
1029 return r;
1030 }
1031 1077
1032 r = rv770_startup(rdev); 1078 r = rv770_startup(rdev);
1033 if (r) { 1079 if (r) {
@@ -1118,9 +1164,6 @@ int rv770_init(struct radeon_device *rdev)
1118 radeon_surface_init(rdev); 1164 radeon_surface_init(rdev);
1119 /* Initialize clocks */ 1165 /* Initialize clocks */
1120 radeon_get_clock_info(rdev->ddev); 1166 radeon_get_clock_info(rdev->ddev);
1121 r = radeon_clocks_init(rdev);
1122 if (r)
1123 return r;
1124 /* Fence driver */ 1167 /* Fence driver */
1125 r = radeon_fence_driver_init(rdev); 1168 r = radeon_fence_driver_init(rdev);
1126 if (r) 1169 if (r)
@@ -1195,9 +1238,9 @@ void rv770_fini(struct radeon_device *rdev)
1195 r600_irq_fini(rdev); 1238 r600_irq_fini(rdev);
1196 radeon_irq_kms_fini(rdev); 1239 radeon_irq_kms_fini(rdev);
1197 rv770_pcie_gart_fini(rdev); 1240 rv770_pcie_gart_fini(rdev);
1241 rv770_vram_scratch_fini(rdev);
1198 radeon_gem_fini(rdev); 1242 radeon_gem_fini(rdev);
1199 radeon_fence_driver_fini(rdev); 1243 radeon_fence_driver_fini(rdev);
1200 radeon_clocks_fini(rdev);
1201 radeon_agp_fini(rdev); 1244 radeon_agp_fini(rdev);
1202 radeon_bo_fini(rdev); 1245 radeon_bo_fini(rdev);
1203 radeon_atombios_fini(rdev); 1246 radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 976dc8d25280..bf5f83ea14fe 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1082,10 +1082,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1082} 1082}
1083 1083
1084struct drm_ioctl_desc savage_ioctls[] = { 1084struct drm_ioctl_desc savage_ioctls[] = {
1085 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1085 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1086 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), 1086 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1087 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH), 1087 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1088 DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH), 1088 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1089}; 1089};
1090 1090
1091int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); 1091int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 07d0f2979cac..7fe2b63412ce 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -320,12 +320,12 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
320} 320}
321 321
322struct drm_ioctl_desc sis_ioctls[] = { 322struct drm_ioctl_desc sis_ioctls[] = {
323 DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), 323 DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
324 DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH), 324 DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
325 DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 325 DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
326 DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), 326 DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
327 DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH), 327 DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
328 DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), 328 DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
329}; 329};
330 330
331int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); 331int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 68dda74a50ae..cc0ffa9abd00 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -722,20 +722,20 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
722} 722}
723 723
724struct drm_ioctl_desc via_ioctls[] = { 724struct drm_ioctl_desc via_ioctls[] = {
725 DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), 725 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
726 DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH), 726 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
727 DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), 727 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
728 DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), 728 DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
729 DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), 729 DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
730 DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), 730 DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
731 DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH), 731 DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
732 DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), 732 DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
733 DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH), 733 DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
734 DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), 734 DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
735 DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), 735 DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
736 DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), 736 DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
737 DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), 737 DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
738 DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) 738 DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
739}; 739};
740 740
741int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls); 741int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9dd395b90216..72ec2e2b6e97 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -99,47 +99,47 @@
99 */ 99 */
100 100
101#define VMW_IOCTL_DEF(ioctl, func, flags) \ 101#define VMW_IOCTL_DEF(ioctl, func, flags) \
102 [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} 102 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
103 103
104/** 104/**
105 * Ioctl definitions. 105 * Ioctl definitions.
106 */ 106 */
107 107
108static struct drm_ioctl_desc vmw_ioctls[] = { 108static struct drm_ioctl_desc vmw_ioctls[] = {
109 VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, 109 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
110 DRM_AUTH | DRM_UNLOCKED), 110 DRM_AUTH | DRM_UNLOCKED),
111 VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 111 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
112 DRM_AUTH | DRM_UNLOCKED), 112 DRM_AUTH | DRM_UNLOCKED),
113 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, 113 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
114 DRM_AUTH | DRM_UNLOCKED), 114 DRM_AUTH | DRM_UNLOCKED),
115 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, 115 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
116 vmw_kms_cursor_bypass_ioctl, 116 vmw_kms_cursor_bypass_ioctl,
117 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 117 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
118 118
119 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, 119 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
120 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 120 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
121 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 121 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
122 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 122 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
123 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 123 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
124 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 124 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
125 125
126 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 126 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
127 DRM_AUTH | DRM_UNLOCKED), 127 DRM_AUTH | DRM_UNLOCKED),
128 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 128 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
129 DRM_AUTH | DRM_UNLOCKED), 129 DRM_AUTH | DRM_UNLOCKED),
130 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 130 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
131 DRM_AUTH | DRM_UNLOCKED), 131 DRM_AUTH | DRM_UNLOCKED),
132 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 132 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
133 DRM_AUTH | DRM_UNLOCKED), 133 DRM_AUTH | DRM_UNLOCKED),
134 VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, 134 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
135 DRM_AUTH | DRM_UNLOCKED), 135 DRM_AUTH | DRM_UNLOCKED),
136 VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, 136 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
137 DRM_AUTH | DRM_UNLOCKED), 137 DRM_AUTH | DRM_UNLOCKED),
138 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, 138 VMW_IOCTL_DEF(VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
139 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), 139 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
140 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, 140 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
141 DRM_AUTH | DRM_UNLOCKED), 141 DRM_AUTH | DRM_UNLOCKED),
142 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl, 142 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, vmw_kms_update_layout_ioctl,
143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED) 143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED)
144}; 144};
145 145
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e635199a0cd2..0c52899be964 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1299,6 +1299,7 @@ static const struct hid_device_id hid_blacklist[] = {
1299 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, 1299 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
1300 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, 1300 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
1301 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, 1301 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
1302 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
1302 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 1303 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
1303 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, 1304 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
1304 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, 1305 { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index f44bdc084cb2..8ca7f65cf2f8 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -159,6 +159,13 @@ static int egalax_event(struct hid_device *hid, struct hid_field *field,
159{ 159{
160 struct egalax_data *td = hid_get_drvdata(hid); 160 struct egalax_data *td = hid_get_drvdata(hid);
161 161
162 /* Note, eGalax has two product lines: the first is resistive and
163 * uses a standard parallel multitouch protocol (product ID ==
164 * 48xx). The second is capacitive and uses an unusual "serial"
165 * protocol with a different message for each multitouch finger
166 * (product ID == 72xx). We do not yet generate a correct event
167 * sequence for the capacitive/serial protocol.
168 */
162 if (hid->claimed & HID_CLAIMED_INPUT) { 169 if (hid->claimed & HID_CLAIMED_INPUT) {
163 struct input_dev *input = field->hidinput->input; 170 struct input_dev *input = field->hidinput->input;
164 171
@@ -246,6 +253,8 @@ static void egalax_remove(struct hid_device *hdev)
246static const struct hid_device_id egalax_devices[] = { 253static const struct hid_device_id egalax_devices[] = {
247 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, 254 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
248 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, 255 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
256 { HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
257 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
249 { } 258 { }
250}; 259};
251MODULE_DEVICE_TABLE(hid, egalax_devices); 260MODULE_DEVICE_TABLE(hid, egalax_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d3fc13ae094d..85c6d13c9ffa 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -188,6 +188,7 @@
188#define USB_VENDOR_ID_DWAV 0x0eef 188#define USB_VENDOR_ID_DWAV 0x0eef
189#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001 189#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
190#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d 190#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
191#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
191 192
192#define USB_VENDOR_ID_ELECOM 0x056e 193#define USB_VENDOR_ID_ELECOM 0x056e
193#define USB_DEVICE_ID_ELECOM_BM084 0x0061 194#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 346f0e34987e..bc2e07740628 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -547,11 +547,11 @@ static void picolcd_fb_destroy(struct fb_info *info)
547 ref_cnt--; 547 ref_cnt--;
548 mutex_lock(&info->lock); 548 mutex_lock(&info->lock);
549 (*ref_cnt)--; 549 (*ref_cnt)--;
550 may_release = !ref_cnt; 550 may_release = !*ref_cnt;
551 mutex_unlock(&info->lock); 551 mutex_unlock(&info->lock);
552 if (may_release) { 552 if (may_release) {
553 framebuffer_release(info);
554 vfree((u8 *)info->fix.smem_start); 553 vfree((u8 *)info->fix.smem_start);
554 framebuffer_release(info);
555 } 555 }
556} 556}
557 557
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 254a003af048..0a29c51114aa 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -266,13 +266,15 @@ static int hiddev_open(struct inode *inode, struct file *file)
266{ 266{
267 struct hiddev_list *list; 267 struct hiddev_list *list;
268 struct usb_interface *intf; 268 struct usb_interface *intf;
269 struct hid_device *hid;
269 struct hiddev *hiddev; 270 struct hiddev *hiddev;
270 int res; 271 int res;
271 272
272 intf = usb_find_interface(&hiddev_driver, iminor(inode)); 273 intf = usb_find_interface(&hiddev_driver, iminor(inode));
273 if (!intf) 274 if (!intf)
274 return -ENODEV; 275 return -ENODEV;
275 hiddev = usb_get_intfdata(intf); 276 hid = usb_get_intfdata(intf);
277 hiddev = hid->hiddev;
276 278
277 if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL))) 279 if (!(list = kzalloc(sizeof(struct hiddev_list), GFP_KERNEL)))
278 return -ENOMEM; 280 return -ENOMEM;
@@ -587,7 +589,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
587 struct hiddev_list *list = file->private_data; 589 struct hiddev_list *list = file->private_data;
588 struct hiddev *hiddev = list->hiddev; 590 struct hiddev *hiddev = list->hiddev;
589 struct hid_device *hid = hiddev->hid; 591 struct hid_device *hid = hiddev->hid;
590 struct usb_device *dev = hid_to_usb_dev(hid); 592 struct usb_device *dev;
591 struct hiddev_collection_info cinfo; 593 struct hiddev_collection_info cinfo;
592 struct hiddev_report_info rinfo; 594 struct hiddev_report_info rinfo;
593 struct hiddev_field_info finfo; 595 struct hiddev_field_info finfo;
@@ -601,9 +603,11 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
601 /* Called without BKL by compat methods so no BKL taken */ 603 /* Called without BKL by compat methods so no BKL taken */
602 604
603 /* FIXME: Who or what stop this racing with a disconnect ?? */ 605 /* FIXME: Who or what stop this racing with a disconnect ?? */
604 if (!hiddev->exist) 606 if (!hiddev->exist || !hid)
605 return -EIO; 607 return -EIO;
606 608
609 dev = hid_to_usb_dev(hid);
610
607 switch (cmd) { 611 switch (cmd) {
608 612
609 case HIDIOCGVERSION: 613 case HIDIOCGVERSION:
@@ -888,7 +892,6 @@ int hiddev_connect(struct hid_device *hid, unsigned int force)
888 hid->hiddev = hiddev; 892 hid->hiddev = hiddev;
889 hiddev->hid = hid; 893 hiddev->hid = hid;
890 hiddev->exist = 1; 894 hiddev->exist = 1;
891 usb_set_intfdata(usbhid->intf, usbhid);
892 retval = usb_register_dev(usbhid->intf, &hiddev_class); 895 retval = usb_register_dev(usbhid->intf, &hiddev_class);
893 if (retval) { 896 if (retval) {
894 err_hid("Not able to get a minor for this device."); 897 err_hid("Not able to get a minor for this device.");
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 0fba82943125..4d4d09bdec0a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -332,11 +332,11 @@ config SENSORS_F71805F
332 will be called f71805f. 332 will be called f71805f.
333 333
334config SENSORS_F71882FG 334config SENSORS_F71882FG
335 tristate "Fintek F71808E, F71858FG, F71862FG, F71882FG, F71889FG and F8000" 335 tristate "Fintek F71858FG, F71862FG, F71882FG, F71889FG and F8000"
336 depends on EXPERIMENTAL 336 depends on EXPERIMENTAL
337 help 337 help
338 If you say yes here you get support for hardware monitoring features 338 If you say yes here you get support for hardware monitoring
339 of the Fintek F71808E, F71858FG, F71862FG/71863FG, F71882FG/F71883FG, 339 features of the Fintek F71858FG, F71862FG/71863FG, F71882FG/F71883FG,
340 F71889FG and F8000 Super-I/O chips. 340 F71889FG and F8000 Super-I/O chips.
341 341
342 This driver can also be built as a module. If so, the module 342 This driver can also be built as a module. If so, the module
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index b300a2048af1..52319340e182 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -160,30 +160,12 @@ static const struct attribute_group ads7871_group = {
160 160
161static int __devinit ads7871_probe(struct spi_device *spi) 161static int __devinit ads7871_probe(struct spi_device *spi)
162{ 162{
163 int status, ret, err = 0; 163 int ret, err;
164 uint8_t val; 164 uint8_t val;
165 struct ads7871_data *pdata; 165 struct ads7871_data *pdata;
166 166
167 dev_dbg(&spi->dev, "probe\n"); 167 dev_dbg(&spi->dev, "probe\n");
168 168
169 pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
170 if (!pdata) {
171 err = -ENOMEM;
172 goto exit;
173 }
174
175 status = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
176 if (status < 0)
177 goto error_free;
178
179 pdata->hwmon_dev = hwmon_device_register(&spi->dev);
180 if (IS_ERR(pdata->hwmon_dev)) {
181 err = PTR_ERR(pdata->hwmon_dev);
182 goto error_remove;
183 }
184
185 spi_set_drvdata(spi, pdata);
186
187 /* Configure the SPI bus */ 169 /* Configure the SPI bus */
188 spi->mode = (SPI_MODE_0); 170 spi->mode = (SPI_MODE_0);
189 spi->bits_per_word = 8; 171 spi->bits_per_word = 8;
@@ -201,6 +183,24 @@ static int __devinit ads7871_probe(struct spi_device *spi)
201 we need to make sure we really have a chip*/ 183 we need to make sure we really have a chip*/
202 if (val != ret) { 184 if (val != ret) {
203 err = -ENODEV; 185 err = -ENODEV;
186 goto exit;
187 }
188
189 pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL);
190 if (!pdata) {
191 err = -ENOMEM;
192 goto exit;
193 }
194
195 err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
196 if (err < 0)
197 goto error_free;
198
199 spi_set_drvdata(spi, pdata);
200
201 pdata->hwmon_dev = hwmon_device_register(&spi->dev);
202 if (IS_ERR(pdata->hwmon_dev)) {
203 err = PTR_ERR(pdata->hwmon_dev);
204 goto error_remove; 204 goto error_remove;
205 } 205 }
206 206
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c070c9714cbe..de8111114f46 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -518,7 +518,6 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
518static int __init coretemp_init(void) 518static int __init coretemp_init(void)
519{ 519{
520 int i, err = -ENODEV; 520 int i, err = -ENODEV;
521 struct pdev_entry *p, *n;
522 521
523 /* quick check if we run Intel */ 522 /* quick check if we run Intel */
524 if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL) 523 if (cpu_data(0).x86_vendor != X86_VENDOR_INTEL)
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 6207120dcd4d..537841ef44b9 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -45,7 +45,6 @@
45#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ 45#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
46 46
47#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ 47#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
48#define SIO_F71808_ID 0x0901 /* Chipset ID */
49#define SIO_F71858_ID 0x0507 /* Chipset ID */ 48#define SIO_F71858_ID 0x0507 /* Chipset ID */
50#define SIO_F71862_ID 0x0601 /* Chipset ID */ 49#define SIO_F71862_ID 0x0601 /* Chipset ID */
51#define SIO_F71882_ID 0x0541 /* Chipset ID */ 50#define SIO_F71882_ID 0x0541 /* Chipset ID */
@@ -97,10 +96,9 @@ static unsigned short force_id;
97module_param(force_id, ushort, 0); 96module_param(force_id, ushort, 0);
98MODULE_PARM_DESC(force_id, "Override the detected device ID"); 97MODULE_PARM_DESC(force_id, "Override the detected device ID");
99 98
100enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; 99enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
101 100
102static const char *f71882fg_names[] = { 101static const char *f71882fg_names[] = {
103 "f71808fg",
104 "f71858fg", 102 "f71858fg",
105 "f71862fg", 103 "f71862fg",
106 "f71882fg", 104 "f71882fg",
@@ -308,8 +306,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
308 SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), 306 SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
309}; 307};
310 308
311/* In attr common to the f71862fg, f71882fg and f71889fg */ 309/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
312static struct sensor_device_attribute_2 fxxxx_in_attr[] = { 310static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
313 SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), 311 SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
314 SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), 312 SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
315 SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), 313 SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
@@ -319,22 +317,6 @@ static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
319 SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), 317 SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
320 SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), 318 SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
321 SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), 319 SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
322};
323
324/* In attr for the f71808fg */
325static struct sensor_device_attribute_2 f71808_in_attr[] = {
326 SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
327 SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
328 SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
329 SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
330 SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
331 SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
332 SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 7),
333 SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 8),
334};
335
336/* Temp attr common to the f71808fg, f71862fg, f71882fg and f71889fg */
337static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
338 SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1), 320 SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
339 SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, 321 SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
340 store_temp_max, 0, 1), 322 store_temp_max, 0, 1),
@@ -373,10 +355,6 @@ static struct sensor_device_attribute_2 fxxxx_temp_attr[] = {
373 store_temp_beep, 0, 6), 355 store_temp_beep, 0, 6),
374 SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2), 356 SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
375 SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), 357 SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
376};
377
378/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
379static struct sensor_device_attribute_2 f71862_temp_attr[] = {
380 SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3), 358 SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
381 SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, 359 SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
382 store_temp_max, 0, 3), 360 store_temp_max, 0, 3),
@@ -1011,11 +989,6 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
1011 data->temp_type[1] = 6; 989 data->temp_type[1] = 6;
1012 break; 990 break;
1013 } 991 }
1014 } else if (data->type == f71808fg) {
1015 reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
1016 data->temp_type[1] = (reg & 0x02) ? 2 : 4;
1017 data->temp_type[2] = (reg & 0x04) ? 2 : 4;
1018
1019 } else { 992 } else {
1020 reg2 = f71882fg_read8(data, F71882FG_REG_PECI); 993 reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
1021 if ((reg2 & 0x03) == 0x01) 994 if ((reg2 & 0x03) == 0x01)
@@ -1898,8 +1871,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
1898 1871
1899 val /= 1000; 1872 val /= 1000;
1900 1873
1901 if (data->type == f71889fg 1874 if (data->type == f71889fg)
1902 || data->type == f71808fg)
1903 val = SENSORS_LIMIT(val, -128, 127); 1875 val = SENSORS_LIMIT(val, -128, 127);
1904 else 1876 else
1905 val = SENSORS_LIMIT(val, 0, 127); 1877 val = SENSORS_LIMIT(val, 0, 127);
@@ -2002,28 +1974,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2002 /* fall through! */ 1974 /* fall through! */
2003 case f71862fg: 1975 case f71862fg:
2004 err = f71882fg_create_sysfs_files(pdev, 1976 err = f71882fg_create_sysfs_files(pdev,
2005 f71862_temp_attr, 1977 fxxxx_in_temp_attr,
2006 ARRAY_SIZE(f71862_temp_attr)); 1978 ARRAY_SIZE(fxxxx_in_temp_attr));
2007 if (err)
2008 goto exit_unregister_sysfs;
2009 err = f71882fg_create_sysfs_files(pdev,
2010 fxxxx_in_attr,
2011 ARRAY_SIZE(fxxxx_in_attr));
2012 if (err)
2013 goto exit_unregister_sysfs;
2014 err = f71882fg_create_sysfs_files(pdev,
2015 fxxxx_temp_attr,
2016 ARRAY_SIZE(fxxxx_temp_attr));
2017 break;
2018 case f71808fg:
2019 err = f71882fg_create_sysfs_files(pdev,
2020 f71808_in_attr,
2021 ARRAY_SIZE(f71808_in_attr));
2022 if (err)
2023 goto exit_unregister_sysfs;
2024 err = f71882fg_create_sysfs_files(pdev,
2025 fxxxx_temp_attr,
2026 ARRAY_SIZE(fxxxx_temp_attr));
2027 break; 1979 break;
2028 case f8000: 1980 case f8000:
2029 err = f71882fg_create_sysfs_files(pdev, 1981 err = f71882fg_create_sysfs_files(pdev,
@@ -2050,7 +2002,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2050 case f71862fg: 2002 case f71862fg:
2051 err = (data->pwm_enable & 0x15) != 0x15; 2003 err = (data->pwm_enable & 0x15) != 0x15;
2052 break; 2004 break;
2053 case f71808fg:
2054 case f71882fg: 2005 case f71882fg:
2055 case f71889fg: 2006 case f71889fg:
2056 err = 0; 2007 err = 0;
@@ -2096,7 +2047,6 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
2096 f8000_auto_pwm_attr, 2047 f8000_auto_pwm_attr,
2097 ARRAY_SIZE(f8000_auto_pwm_attr)); 2048 ARRAY_SIZE(f8000_auto_pwm_attr));
2098 break; 2049 break;
2099 case f71808fg:
2100 case f71889fg: 2050 case f71889fg:
2101 for (i = 0; i < nr_fans; i++) { 2051 for (i = 0; i < nr_fans; i++) {
2102 data->pwm_auto_point_mapping[i] = 2052 data->pwm_auto_point_mapping[i] =
@@ -2176,22 +2126,8 @@ static int f71882fg_remove(struct platform_device *pdev)
2176 /* fall through! */ 2126 /* fall through! */
2177 case f71862fg: 2127 case f71862fg:
2178 f71882fg_remove_sysfs_files(pdev, 2128 f71882fg_remove_sysfs_files(pdev,
2179 f71862_temp_attr, 2129 fxxxx_in_temp_attr,
2180 ARRAY_SIZE(f71862_temp_attr)); 2130 ARRAY_SIZE(fxxxx_in_temp_attr));
2181 f71882fg_remove_sysfs_files(pdev,
2182 fxxxx_in_attr,
2183 ARRAY_SIZE(fxxxx_in_attr));
2184 f71882fg_remove_sysfs_files(pdev,
2185 fxxxx_temp_attr,
2186 ARRAY_SIZE(fxxxx_temp_attr));
2187 break;
2188 case f71808fg:
2189 f71882fg_remove_sysfs_files(pdev,
2190 f71808_in_attr,
2191 ARRAY_SIZE(f71808_in_attr));
2192 f71882fg_remove_sysfs_files(pdev,
2193 fxxxx_temp_attr,
2194 ARRAY_SIZE(fxxxx_temp_attr));
2195 break; 2131 break;
2196 case f8000: 2132 case f8000:
2197 f71882fg_remove_sysfs_files(pdev, 2133 f71882fg_remove_sysfs_files(pdev,
@@ -2259,9 +2195,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
2259 2195
2260 devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); 2196 devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
2261 switch (devid) { 2197 switch (devid) {
2262 case SIO_F71808_ID:
2263 sio_data->type = f71808fg;
2264 break;
2265 case SIO_F71858_ID: 2198 case SIO_F71858_ID:
2266 sio_data->type = f71858fg; 2199 sio_data->type = f71858fg;
2267 break; 2200 break;
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index b9bb3e0ca530..39ead2a4d3c5 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -143,6 +143,37 @@ static const struct pci_device_id k8temp_ids[] = {
143 143
144MODULE_DEVICE_TABLE(pci, k8temp_ids); 144MODULE_DEVICE_TABLE(pci, k8temp_ids);
145 145
146static int __devinit is_rev_g_desktop(u8 model)
147{
148 u32 brandidx;
149
150 if (model < 0x69)
151 return 0;
152
153 if (model == 0xc1 || model == 0x6c || model == 0x7c)
154 return 0;
155
156 /*
157 * Differentiate between AM2 and ASB1.
158 * See "Constructing the processor Name String" in "Revision
159 * Guide for AMD NPT Family 0Fh Processors" (33610).
160 */
161 brandidx = cpuid_ebx(0x80000001);
162 brandidx = (brandidx >> 9) & 0x1f;
163
164 /* Single core */
165 if ((model == 0x6f || model == 0x7f) &&
166 (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
167 return 0;
168
169 /* Dual core */
170 if (model == 0x6b &&
171 (brandidx == 0xb || brandidx == 0xc))
172 return 0;
173
174 return 1;
175}
176
146static int __devinit k8temp_probe(struct pci_dev *pdev, 177static int __devinit k8temp_probe(struct pci_dev *pdev,
147 const struct pci_device_id *id) 178 const struct pci_device_id *id)
148{ 179{
@@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
179 "wrong - check erratum #141\n"); 210 "wrong - check erratum #141\n");
180 } 211 }
181 212
182 if ((model >= 0x69) && 213 if (is_rev_g_desktop(model)) {
183 !(model == 0xc1 || model == 0x6c || model == 0x7c ||
184 model == 0x6b || model == 0x6f || model == 0x7f)) {
185 /* 214 /*
186 * RevG desktop CPUs (i.e. no socket S1G1 or 215 * RevG desktop CPUs (i.e. no socket S1G1 or
187 * ASB1 parts) need additional offset, 216 * ASB1 parts) need additional offset,
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d0dc1db80b29..50815022cff1 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -1106,7 +1106,7 @@ static int ohci_iso_recv_init(struct hpsb_iso *iso)
1106 if (recv->block_irq_interval * 4 > iso->buf_packets) 1106 if (recv->block_irq_interval * 4 > iso->buf_packets)
1107 recv->block_irq_interval = iso->buf_packets / 4; 1107 recv->block_irq_interval = iso->buf_packets / 4;
1108 if (recv->block_irq_interval < 1) 1108 if (recv->block_irq_interval < 1)
1109 recv->block_irq_interval = 1; 1109 recv->block_irq_interval = 1;
1110 1110
1111 /* choose a buffer stride */ 1111 /* choose a buffer stride */
1112 /* must be a power of 2, and <= PAGE_SIZE */ 1112 /* must be a power of 2, and <= PAGE_SIZE */
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index dcc86b97a153..19fa94af207a 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -232,13 +232,13 @@ static void hil_dev_handle_ptr_events(struct hil_dev *ptr)
232 if (absdev) { 232 if (absdev) {
233 val = lo + (hi << 8); 233 val = lo + (hi << 8);
234#ifdef TABLET_AUTOADJUST 234#ifdef TABLET_AUTOADJUST
235 if (val < input_abs_min(dev, ABS_X + i)) 235 if (val < input_abs_get_min(dev, ABS_X + i))
236 input_abs_set_min(dev, ABS_X + i, val); 236 input_abs_set_min(dev, ABS_X + i, val);
237 if (val > input_abs_max(dev, ABS_X + i)) 237 if (val > input_abs_get_max(dev, ABS_X + i))
238 input_abs_set_max(dev, ABS_X + i, val); 238 input_abs_set_max(dev, ABS_X + i, val);
239#endif 239#endif
240 if (i % 3) 240 if (i % 3)
241 val = input_abs_max(dev, ABS_X + i) - val; 241 val = input_abs_get_max(dev, ABS_X + i) - val;
242 input_report_abs(dev, ABS_X + i, val); 242 input_report_abs(dev, ABS_X + i, val);
243 } else { 243 } else {
244 val = (int) (((int8_t) lo) | ((int8_t) hi << 8)); 244 val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
@@ -388,11 +388,11 @@ static void hil_dev_pointer_setup(struct hil_dev *ptr)
388 388
389#ifdef TABLET_AUTOADJUST 389#ifdef TABLET_AUTOADJUST
390 for (i = 0; i < ABS_MAX; i++) { 390 for (i = 0; i < ABS_MAX; i++) {
391 int diff = input_abs_max(input_dev, ABS_X + i) / 10; 391 int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
392 input_abs_set_min(input_dev, ABS_X + i, 392 input_abs_set_min(input_dev, ABS_X + i,
393 input_abs_min(input_dev, ABS_X + i) + diff) 393 input_abs_get_min(input_dev, ABS_X + i) + diff);
394 input_abs_set_max(input_dev, ABS_X + i, 394 input_abs_set_max(input_dev, ABS_X + i,
395 input_abs_max(input_dev, ABS_X + i) - diff) 395 input_abs_get_max(input_dev, ABS_X + i) - diff);
396 } 396 }
397#endif 397#endif
398 398
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 0e53b3bc39af..f32404f99189 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -567,8 +567,6 @@ static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
567 clk_put(keypad->clk); 567 clk_put(keypad->clk);
568 568
569 input_unregister_device(keypad->input_dev); 569 input_unregister_device(keypad->input_dev);
570 input_free_device(keypad->input_dev);
571
572 iounmap(keypad->mmio_base); 570 iounmap(keypad->mmio_base);
573 571
574 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 572 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index bb53fd33cd1c..0d4266a533a5 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -811,6 +811,8 @@ static struct miscdevice uinput_misc = {
811 .minor = UINPUT_MINOR, 811 .minor = UINPUT_MINOR,
812 .name = UINPUT_NAME, 812 .name = UINPUT_NAME,
813}; 813};
814MODULE_ALIAS_MISCDEV(UINPUT_MINOR);
815MODULE_ALIAS("devname:" UINPUT_NAME);
814 816
815static int __init uinput_init(void) 817static int __init uinput_init(void)
816{ 818{
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 83c24cca234a..d528a2dba064 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -138,8 +138,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
138 138
139 fx(0) = value; 139 fx(0) = value;
140 if (mousedev->touch && mousedev->pkt_count >= 2) { 140 if (mousedev->touch && mousedev->pkt_count >= 2) {
141 size = input_abs_get_min(dev, ABS_X) - 141 size = input_abs_get_max(dev, ABS_X) -
142 input_abs_get_max(dev, ABS_X); 142 input_abs_get_min(dev, ABS_X);
143 if (size == 0) 143 if (size == 0)
144 size = 256 * 2; 144 size = 256 * 2;
145 145
@@ -155,8 +155,8 @@ static void mousedev_touchpad_event(struct input_dev *dev,
155 fy(0) = value; 155 fy(0) = value;
156 if (mousedev->touch && mousedev->pkt_count >= 2) { 156 if (mousedev->touch && mousedev->pkt_count >= 2) {
157 /* use X size for ABS_Y to keep the same scale */ 157 /* use X size for ABS_Y to keep the same scale */
158 size = input_abs_get_min(dev, ABS_X) - 158 size = input_abs_get_max(dev, ABS_X) -
159 input_abs_get_max(dev, ABS_X); 159 input_abs_get_min(dev, ABS_X);
160 if (size == 0) 160 if (size == 0)
161 size = 256 * 2; 161 size = 256 * 2;
162 162
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 35bc2737412f..2d17e76066bd 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -45,6 +45,7 @@
45#include <linux/syscalls.h> 45#include <linux/syscalls.h>
46#include <linux/suspend.h> 46#include <linux/suspend.h>
47#include <linux/cpu.h> 47#include <linux/cpu.h>
48#include <linux/compat.h>
48#include <asm/prom.h> 49#include <asm/prom.h>
49#include <asm/machdep.h> 50#include <asm/machdep.h>
50#include <asm/io.h> 51#include <asm/io.h>
@@ -2349,11 +2350,52 @@ static long pmu_unlocked_ioctl(struct file *filp,
2349 return ret; 2350 return ret;
2350} 2351}
2351 2352
2353#ifdef CONFIG_COMPAT
2354#define PMU_IOC_GET_BACKLIGHT32 _IOR('B', 1, compat_size_t)
2355#define PMU_IOC_SET_BACKLIGHT32 _IOW('B', 2, compat_size_t)
2356#define PMU_IOC_GET_MODEL32 _IOR('B', 3, compat_size_t)
2357#define PMU_IOC_HAS_ADB32 _IOR('B', 4, compat_size_t)
2358#define PMU_IOC_CAN_SLEEP32 _IOR('B', 5, compat_size_t)
2359#define PMU_IOC_GRAB_BACKLIGHT32 _IOR('B', 6, compat_size_t)
2360
2361static long compat_pmu_ioctl (struct file *filp, u_int cmd, u_long arg)
2362{
2363 switch (cmd) {
2364 case PMU_IOC_SLEEP:
2365 break;
2366 case PMU_IOC_GET_BACKLIGHT32:
2367 cmd = PMU_IOC_GET_BACKLIGHT;
2368 break;
2369 case PMU_IOC_SET_BACKLIGHT32:
2370 cmd = PMU_IOC_SET_BACKLIGHT;
2371 break;
2372 case PMU_IOC_GET_MODEL32:
2373 cmd = PMU_IOC_GET_MODEL;
2374 break;
2375 case PMU_IOC_HAS_ADB32:
2376 cmd = PMU_IOC_HAS_ADB;
2377 break;
2378 case PMU_IOC_CAN_SLEEP32:
2379 cmd = PMU_IOC_CAN_SLEEP;
2380 break;
2381 case PMU_IOC_GRAB_BACKLIGHT32:
2382 cmd = PMU_IOC_GRAB_BACKLIGHT;
2383 break;
2384 default:
2385 return -ENOIOCTLCMD;
2386 }
2387 return pmu_unlocked_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
2388}
2389#endif
2390
2352static const struct file_operations pmu_device_fops = { 2391static const struct file_operations pmu_device_fops = {
2353 .read = pmu_read, 2392 .read = pmu_read,
2354 .write = pmu_write, 2393 .write = pmu_write,
2355 .poll = pmu_fpoll, 2394 .poll = pmu_fpoll,
2356 .unlocked_ioctl = pmu_unlocked_ioctl, 2395 .unlocked_ioctl = pmu_unlocked_ioctl,
2396#ifdef CONFIG_COMPAT
2397 .compat_ioctl = compat_pmu_ioctl,
2398#endif
2357 .open = pmu_open, 2399 .open = pmu_open,
2358 .release = pmu_release, 2400 .release = pmu_release,
2359}; 2401};
diff --git a/drivers/md/.gitignore b/drivers/md/.gitignore
deleted file mode 100644
index a7afec6b19c6..000000000000
--- a/drivers/md/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
1mktables
2raid6altivec*.c
3raid6int*.c
4raid6tables.c
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 1ba1e122e948..ed4900ade93a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1542,8 +1542,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
1542 atomic_read(&bitmap->mddev->recovery_active) == 0); 1542 atomic_read(&bitmap->mddev->recovery_active) == 0);
1543 1543
1544 bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync; 1544 bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
1545 if (bitmap->mddev->persistent) 1545 set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1546 set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
1547 sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1); 1546 sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
1548 s = 0; 1547 s = 0;
1549 while (s < sector && s < bitmap->mddev->resync_max_sectors) { 1548 while (s < sector && s < bitmap->mddev->resync_max_sectors) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 11567c7999a2..43cf9cc9c1df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2136,16 +2136,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
2136 * with the rest of the array) 2136 * with the rest of the array)
2137 */ 2137 */
2138 mdk_rdev_t *rdev; 2138 mdk_rdev_t *rdev;
2139
2140 /* First make sure individual recovery_offsets are correct */
2141 list_for_each_entry(rdev, &mddev->disks, same_set) {
2142 if (rdev->raid_disk >= 0 &&
2143 mddev->delta_disks >= 0 &&
2144 !test_bit(In_sync, &rdev->flags) &&
2145 mddev->curr_resync_completed > rdev->recovery_offset)
2146 rdev->recovery_offset = mddev->curr_resync_completed;
2147
2148 }
2149 list_for_each_entry(rdev, &mddev->disks, same_set) { 2139 list_for_each_entry(rdev, &mddev->disks, same_set) {
2150 if (rdev->sb_events == mddev->events || 2140 if (rdev->sb_events == mddev->events ||
2151 (nospares && 2141 (nospares &&
@@ -2167,13 +2157,27 @@ static void md_update_sb(mddev_t * mddev, int force_change)
2167 int sync_req; 2157 int sync_req;
2168 int nospares = 0; 2158 int nospares = 0;
2169 2159
2170 mddev->utime = get_seconds();
2171 if (mddev->external)
2172 return;
2173repeat: 2160repeat:
2161 /* First make sure individual recovery_offsets are correct */
2162 list_for_each_entry(rdev, &mddev->disks, same_set) {
2163 if (rdev->raid_disk >= 0 &&
2164 mddev->delta_disks >= 0 &&
2165 !test_bit(In_sync, &rdev->flags) &&
2166 mddev->curr_resync_completed > rdev->recovery_offset)
2167 rdev->recovery_offset = mddev->curr_resync_completed;
2168
2169 }
2170 if (!mddev->persistent) {
2171 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2172 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2173 wake_up(&mddev->sb_wait);
2174 return;
2175 }
2176
2174 spin_lock_irq(&mddev->write_lock); 2177 spin_lock_irq(&mddev->write_lock);
2175 2178
2176 set_bit(MD_CHANGE_PENDING, &mddev->flags); 2179 mddev->utime = get_seconds();
2180
2177 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2181 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2178 force_change = 1; 2182 force_change = 1;
2179 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2183 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
@@ -2221,19 +2225,6 @@ repeat:
2221 MD_BUG(); 2225 MD_BUG();
2222 mddev->events --; 2226 mddev->events --;
2223 } 2227 }
2224
2225 /*
2226 * do not write anything to disk if using
2227 * nonpersistent superblocks
2228 */
2229 if (!mddev->persistent) {
2230 if (!mddev->external)
2231 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2232
2233 spin_unlock_irq(&mddev->write_lock);
2234 wake_up(&mddev->sb_wait);
2235 return;
2236 }
2237 sync_sbs(mddev, nospares); 2228 sync_sbs(mddev, nospares);
2238 spin_unlock_irq(&mddev->write_lock); 2229 spin_unlock_irq(&mddev->write_lock);
2239 2230
@@ -3379,7 +3370,7 @@ array_state_show(mddev_t *mddev, char *page)
3379 case 0: 3370 case 0:
3380 if (mddev->in_sync) 3371 if (mddev->in_sync)
3381 st = clean; 3372 st = clean;
3382 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 3373 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3383 st = write_pending; 3374 st = write_pending;
3384 else if (mddev->safemode) 3375 else if (mddev->safemode)
3385 st = active_idle; 3376 st = active_idle;
@@ -3460,9 +3451,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
3460 mddev->in_sync = 1; 3451 mddev->in_sync = 1;
3461 if (mddev->safemode == 1) 3452 if (mddev->safemode == 1)
3462 mddev->safemode = 0; 3453 mddev->safemode = 0;
3463 if (mddev->persistent) 3454 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3464 set_bit(MD_CHANGE_CLEAN,
3465 &mddev->flags);
3466 } 3455 }
3467 err = 0; 3456 err = 0;
3468 } else 3457 } else
@@ -3474,8 +3463,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
3474 case active: 3463 case active:
3475 if (mddev->pers) { 3464 if (mddev->pers) {
3476 restart_array(mddev); 3465 restart_array(mddev);
3477 if (mddev->external) 3466 clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3478 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
3479 wake_up(&mddev->sb_wait); 3467 wake_up(&mddev->sb_wait);
3480 err = 0; 3468 err = 0;
3481 } else { 3469 } else {
@@ -6580,6 +6568,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
6580 if (mddev->in_sync) { 6568 if (mddev->in_sync) {
6581 mddev->in_sync = 0; 6569 mddev->in_sync = 0;
6582 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6570 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6571 set_bit(MD_CHANGE_PENDING, &mddev->flags);
6583 md_wakeup_thread(mddev->thread); 6572 md_wakeup_thread(mddev->thread);
6584 did_change = 1; 6573 did_change = 1;
6585 } 6574 }
@@ -6588,7 +6577,6 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
6588 if (did_change) 6577 if (did_change)
6589 sysfs_notify_dirent_safe(mddev->sysfs_state); 6578 sysfs_notify_dirent_safe(mddev->sysfs_state);
6590 wait_event(mddev->sb_wait, 6579 wait_event(mddev->sb_wait,
6591 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
6592 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6580 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6593} 6581}
6594 6582
@@ -6624,6 +6612,7 @@ int md_allow_write(mddev_t *mddev)
6624 if (mddev->in_sync) { 6612 if (mddev->in_sync) {
6625 mddev->in_sync = 0; 6613 mddev->in_sync = 0;
6626 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6614 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6615 set_bit(MD_CHANGE_PENDING, &mddev->flags);
6627 if (mddev->safemode_delay && 6616 if (mddev->safemode_delay &&
6628 mddev->safemode == 0) 6617 mddev->safemode == 0)
6629 mddev->safemode = 1; 6618 mddev->safemode = 1;
@@ -6633,7 +6622,7 @@ int md_allow_write(mddev_t *mddev)
6633 } else 6622 } else
6634 spin_unlock_irq(&mddev->write_lock); 6623 spin_unlock_irq(&mddev->write_lock);
6635 6624
6636 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 6625 if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
6637 return -EAGAIN; 6626 return -EAGAIN;
6638 else 6627 else
6639 return 0; 6628 return 0;
@@ -6831,8 +6820,7 @@ void md_do_sync(mddev_t *mddev)
6831 atomic_read(&mddev->recovery_active) == 0); 6820 atomic_read(&mddev->recovery_active) == 0);
6832 mddev->curr_resync_completed = 6821 mddev->curr_resync_completed =
6833 mddev->curr_resync; 6822 mddev->curr_resync;
6834 if (mddev->persistent) 6823 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6835 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
6836 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6824 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
6837 } 6825 }
6838 6826
@@ -7111,8 +7099,7 @@ void md_check_recovery(mddev_t *mddev)
7111 mddev->recovery_cp == MaxSector) { 7099 mddev->recovery_cp == MaxSector) {
7112 mddev->in_sync = 1; 7100 mddev->in_sync = 1;
7113 did_change = 1; 7101 did_change = 1;
7114 if (mddev->persistent) 7102 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7115 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7116 } 7103 }
7117 if (mddev->safemode == 1) 7104 if (mddev->safemode == 1)
7118 mddev->safemode = 0; 7105 mddev->safemode = 0;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index a953fe2808ae..3931299788dc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -140,7 +140,7 @@ struct mddev_s
140 unsigned long flags; 140 unsigned long flags;
141#define MD_CHANGE_DEVS 0 /* Some device status has changed */ 141#define MD_CHANGE_DEVS 0 /* Some device status has changed */
142#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ 142#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
143#define MD_CHANGE_PENDING 2 /* superblock update in progress */ 143#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
144 144
145 int suspended; 145 int suspended;
146 atomic_t active_io; 146 atomic_t active_io;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 73cc74ffc26b..ad83a4dcadc3 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -787,8 +787,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
787 struct bio_list bl; 787 struct bio_list bl;
788 struct page **behind_pages = NULL; 788 struct page **behind_pages = NULL;
789 const int rw = bio_data_dir(bio); 789 const int rw = bio_data_dir(bio);
790 const bool do_sync = (bio->bi_rw & REQ_SYNC); 790 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
791 bool do_barriers; 791 unsigned long do_barriers;
792 mdk_rdev_t *blocked_rdev; 792 mdk_rdev_t *blocked_rdev;
793 793
794 /* 794 /*
@@ -1120,6 +1120,8 @@ static int raid1_spare_active(mddev_t *mddev)
1120{ 1120{
1121 int i; 1121 int i;
1122 conf_t *conf = mddev->private; 1122 conf_t *conf = mddev->private;
1123 int count = 0;
1124 unsigned long flags;
1123 1125
1124 /* 1126 /*
1125 * Find all failed disks within the RAID1 configuration 1127 * Find all failed disks within the RAID1 configuration
@@ -1131,15 +1133,16 @@ static int raid1_spare_active(mddev_t *mddev)
1131 if (rdev 1133 if (rdev
1132 && !test_bit(Faulty, &rdev->flags) 1134 && !test_bit(Faulty, &rdev->flags)
1133 && !test_and_set_bit(In_sync, &rdev->flags)) { 1135 && !test_and_set_bit(In_sync, &rdev->flags)) {
1134 unsigned long flags; 1136 count++;
1135 spin_lock_irqsave(&conf->device_lock, flags); 1137 sysfs_notify_dirent(rdev->sysfs_state);
1136 mddev->degraded--;
1137 spin_unlock_irqrestore(&conf->device_lock, flags);
1138 } 1138 }
1139 } 1139 }
1140 spin_lock_irqsave(&conf->device_lock, flags);
1141 mddev->degraded -= count;
1142 spin_unlock_irqrestore(&conf->device_lock, flags);
1140 1143
1141 print_conf(conf); 1144 print_conf(conf);
1142 return 0; 1145 return count;
1143} 1146}
1144 1147
1145 1148
@@ -1640,7 +1643,7 @@ static void raid1d(mddev_t *mddev)
1640 * We already have a nr_pending reference on these rdevs. 1643 * We already have a nr_pending reference on these rdevs.
1641 */ 1644 */
1642 int i; 1645 int i;
1643 const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); 1646 const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
1644 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1647 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1645 clear_bit(R1BIO_Barrier, &r1_bio->state); 1648 clear_bit(R1BIO_Barrier, &r1_bio->state);
1646 for (i=0; i < conf->raid_disks; i++) 1649 for (i=0; i < conf->raid_disks; i++)
@@ -1696,7 +1699,7 @@ static void raid1d(mddev_t *mddev)
1696 (unsigned long long)r1_bio->sector); 1699 (unsigned long long)r1_bio->sector);
1697 raid_end_bio_io(r1_bio); 1700 raid_end_bio_io(r1_bio);
1698 } else { 1701 } else {
1699 const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; 1702 const unsigned long do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
1700 r1_bio->bios[r1_bio->read_disk] = 1703 r1_bio->bios[r1_bio->read_disk] =
1701 mddev->ro ? IO_BLOCKED : NULL; 1704 mddev->ro ? IO_BLOCKED : NULL;
1702 r1_bio->read_disk = disk; 1705 r1_bio->read_disk = disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a88aeb5198c7..84718383124d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -799,7 +799,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
799 int i; 799 int i;
800 int chunk_sects = conf->chunk_mask + 1; 800 int chunk_sects = conf->chunk_mask + 1;
801 const int rw = bio_data_dir(bio); 801 const int rw = bio_data_dir(bio);
802 const bool do_sync = (bio->bi_rw & REQ_SYNC); 802 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
803 struct bio_list bl; 803 struct bio_list bl;
804 unsigned long flags; 804 unsigned long flags;
805 mdk_rdev_t *blocked_rdev; 805 mdk_rdev_t *blocked_rdev;
@@ -1116,6 +1116,8 @@ static int raid10_spare_active(mddev_t *mddev)
1116 int i; 1116 int i;
1117 conf_t *conf = mddev->private; 1117 conf_t *conf = mddev->private;
1118 mirror_info_t *tmp; 1118 mirror_info_t *tmp;
1119 int count = 0;
1120 unsigned long flags;
1119 1121
1120 /* 1122 /*
1121 * Find all non-in_sync disks within the RAID10 configuration 1123 * Find all non-in_sync disks within the RAID10 configuration
@@ -1126,15 +1128,16 @@ static int raid10_spare_active(mddev_t *mddev)
1126 if (tmp->rdev 1128 if (tmp->rdev
1127 && !test_bit(Faulty, &tmp->rdev->flags) 1129 && !test_bit(Faulty, &tmp->rdev->flags)
1128 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 1130 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1129 unsigned long flags; 1131 count++;
1130 spin_lock_irqsave(&conf->device_lock, flags); 1132 sysfs_notify_dirent(tmp->rdev->sysfs_state);
1131 mddev->degraded--;
1132 spin_unlock_irqrestore(&conf->device_lock, flags);
1133 } 1133 }
1134 } 1134 }
1135 spin_lock_irqsave(&conf->device_lock, flags);
1136 mddev->degraded -= count;
1137 spin_unlock_irqrestore(&conf->device_lock, flags);
1135 1138
1136 print_conf(conf); 1139 print_conf(conf);
1137 return 0; 1140 return count;
1138} 1141}
1139 1142
1140 1143
@@ -1734,7 +1737,7 @@ static void raid10d(mddev_t *mddev)
1734 raid_end_bio_io(r10_bio); 1737 raid_end_bio_io(r10_bio);
1735 bio_put(bio); 1738 bio_put(bio);
1736 } else { 1739 } else {
1737 const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); 1740 const unsigned long do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
1738 bio_put(bio); 1741 bio_put(bio);
1739 rdev = conf->mirrors[mirror].rdev; 1742 rdev = conf->mirrors[mirror].rdev;
1740 if (printk_ratelimit()) 1743 if (printk_ratelimit())
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 866d4b5a144c..69b0a169e43d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5330,6 +5330,8 @@ static int raid5_spare_active(mddev_t *mddev)
5330 int i; 5330 int i;
5331 raid5_conf_t *conf = mddev->private; 5331 raid5_conf_t *conf = mddev->private;
5332 struct disk_info *tmp; 5332 struct disk_info *tmp;
5333 int count = 0;
5334 unsigned long flags;
5333 5335
5334 for (i = 0; i < conf->raid_disks; i++) { 5336 for (i = 0; i < conf->raid_disks; i++) {
5335 tmp = conf->disks + i; 5337 tmp = conf->disks + i;
@@ -5337,14 +5339,15 @@ static int raid5_spare_active(mddev_t *mddev)
5337 && tmp->rdev->recovery_offset == MaxSector 5339 && tmp->rdev->recovery_offset == MaxSector
5338 && !test_bit(Faulty, &tmp->rdev->flags) 5340 && !test_bit(Faulty, &tmp->rdev->flags)
5339 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 5341 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
5340 unsigned long flags; 5342 count++;
5341 spin_lock_irqsave(&conf->device_lock, flags); 5343 sysfs_notify_dirent(tmp->rdev->sysfs_state);
5342 mddev->degraded--;
5343 spin_unlock_irqrestore(&conf->device_lock, flags);
5344 } 5344 }
5345 } 5345 }
5346 spin_lock_irqsave(&conf->device_lock, flags);
5347 mddev->degraded -= count;
5348 spin_unlock_irqrestore(&conf->device_lock, flags);
5346 print_raid5_conf(conf); 5349 print_raid5_conf(conf);
5347 return 0; 5350 return count;
5348} 5351}
5349 5352
5350static int raid5_remove_disk(mddev_t *mddev, int number) 5353static int raid5_remove_disk(mddev_t *mddev, int number)
diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig
index decdeda840d0..fd0830ed10d8 100644
--- a/drivers/media/dvb/mantis/Kconfig
+++ b/drivers/media/dvb/mantis/Kconfig
@@ -1,6 +1,6 @@
1config MANTIS_CORE 1config MANTIS_CORE
2 tristate "Mantis/Hopper PCI bridge based devices" 2 tristate "Mantis/Hopper PCI bridge based devices"
3 depends on PCI && I2C && INPUT 3 depends on PCI && I2C && INPUT && IR_CORE
4 4
5 help 5 help
6 Support for PCI cards based on the Mantis and Hopper PCi bridge. 6 Support for PCI cards based on the Mantis and Hopper PCi bridge.
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 0efe631e50ca..d80cfdc8edd2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -86,7 +86,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
86 init_waitqueue_head(&host->wq); 86 init_waitqueue_head(&host->wq);
87 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 87 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
88 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); 88 INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
89#ifdef CONFIG_PM
89 host->pm_notify.notifier_call = mmc_pm_notify; 90 host->pm_notify.notifier_call = mmc_pm_notify;
91#endif
90 92
91 /* 93 /*
92 * By default, hosts do not support SGIO or large requests. 94 * By default, hosts do not support SGIO or large requests.
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 283190bc2a40..68d12794cfd9 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -132,7 +132,7 @@ config MMC_SDHCI_CNS3XXX
132 132
133config MMC_SDHCI_S3C 133config MMC_SDHCI_S3C
134 tristate "SDHCI support on Samsung S3C SoC" 134 tristate "SDHCI support on Samsung S3C SoC"
135 depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX) 135 depends on MMC_SDHCI && PLAT_SAMSUNG
136 help 136 help
137 This selects the Secure Digital Host Controller Interface (SDHCI) 137 This selects the Secure Digital Host Controller Interface (SDHCI)
138 often referrered to as the HSMMC block in some of the Samsung S3C 138 often referrered to as the HSMMC block in some of the Samsung S3C
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 0a7f2614c6f0..71ad4163b95e 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -242,7 +242,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
242{ 242{
243 struct sdhci_host *host = platform_get_drvdata(dev); 243 struct sdhci_host *host = platform_get_drvdata(dev);
244 if (host) { 244 if (host) {
245 mutex_lock(&host->lock); 245 spin_lock(&host->lock);
246 if (state) { 246 if (state) {
247 dev_dbg(&dev->dev, "card inserted.\n"); 247 dev_dbg(&dev->dev, "card inserted.\n");
248 host->flags &= ~SDHCI_DEVICE_DEAD; 248 host->flags &= ~SDHCI_DEVICE_DEAD;
@@ -252,8 +252,8 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
252 host->flags |= SDHCI_DEVICE_DEAD; 252 host->flags |= SDHCI_DEVICE_DEAD;
253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 253 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
254 } 254 }
255 sdhci_card_detect(host); 255 tasklet_schedule(&host->card_tasklet);
256 mutex_unlock(&host->lock); 256 spin_unlock(&host->lock);
257 } 257 }
258} 258}
259 259
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 785512133b50..401527d273b5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1180,7 +1180,8 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1180 else 1180 else
1181 ctrl &= ~SDHCI_CTRL_4BITBUS; 1181 ctrl &= ~SDHCI_CTRL_4BITBUS;
1182 1182
1183 if (ios->timing == MMC_TIMING_SD_HS) 1183 if (ios->timing == MMC_TIMING_SD_HS &&
1184 !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1184 ctrl |= SDHCI_CTRL_HISPD; 1185 ctrl |= SDHCI_CTRL_HISPD;
1185 else 1186 else
1186 ctrl &= ~SDHCI_CTRL_HISPD; 1187 ctrl &= ~SDHCI_CTRL_HISPD;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 036cfae76368..d316bc79b636 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -245,6 +245,8 @@ struct sdhci_host {
245#define SDHCI_QUIRK_MISSING_CAPS (1<<27) 245#define SDHCI_QUIRK_MISSING_CAPS (1<<27)
246/* Controller uses Auto CMD12 command to stop the transfer */ 246/* Controller uses Auto CMD12 command to stop the transfer */
247#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) 247#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
248/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
249#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
248 250
249 int irq; /* Device IRQ */ 251 int irq; /* Device IRQ */
250 void __iomem * ioaddr; /* Mapped address */ 252 void __iomem * ioaddr; /* Mapped address */
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 00af55d7afba..fe63f6bd663c 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -22,6 +22,7 @@
22#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
23#include <linux/mtd/concat.h> 23#include <linux/mtd/concat.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/of_address.h>
25#include <linux/of_platform.h> 26#include <linux/of_platform.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
27 28
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a3c7473dd409..d551ddd9537a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2866,6 +2866,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2866 */ 2866 */
2867 if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && 2867 if (id_data[0] == id_data[6] && id_data[1] == id_data[7] &&
2868 id_data[0] == NAND_MFR_SAMSUNG && 2868 id_data[0] == NAND_MFR_SAMSUNG &&
2869 (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
2869 id_data[5] != 0x00) { 2870 id_data[5] != 0x00) {
2870 /* Calc pagesize */ 2871 /* Calc pagesize */
2871 mtd->writesize = 2048 << (extid & 0x03); 2872 mtd->writesize = 2048 << (extid & 0x03);
@@ -2934,14 +2935,10 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2934 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1; 2935 chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)) + 32 - 1;
2935 2936
2936 /* Set the bad block position */ 2937 /* Set the bad block position */
2937 if (!(busw & NAND_BUSWIDTH_16) && (*maf_id == NAND_MFR_STMICRO || 2938 if (mtd->writesize > 512 || (busw & NAND_BUSWIDTH_16))
2938 (*maf_id == NAND_MFR_SAMSUNG &&
2939 mtd->writesize == 512) ||
2940 *maf_id == NAND_MFR_AMD))
2941 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
2942 else
2943 chip->badblockpos = NAND_LARGE_BADBLOCK_POS; 2939 chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
2944 2940 else
2941 chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
2945 2942
2946 /* Get chip options, preserve non chip based options */ 2943 /* Get chip options, preserve non chip based options */
2947 chip->options &= ~NAND_CHIPOPTIONS_MSK; 2944 chip->options &= ~NAND_CHIPOPTIONS_MSK;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index e02fa4f0e3c9..4d89f3780207 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = {
363#define tAR_NDTR1(r) (((r) >> 0) & 0xf) 363#define tAR_NDTR1(r) (((r) >> 0) & 0xf)
364 364
365/* convert nano-seconds to nand flash controller clock cycles */ 365/* convert nano-seconds to nand flash controller clock cycles */
366#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1) 366#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
367 367
368/* convert nand flash controller clock cycles to nano-seconds */ 368/* convert nand flash controller clock cycles to nano-seconds */
369#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000)) 369#define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000))
diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug
index 2246f154e2f7..61f6e5e40458 100644
--- a/drivers/mtd/ubi/Kconfig.debug
+++ b/drivers/mtd/ubi/Kconfig.debug
@@ -6,7 +6,7 @@ config MTD_UBI_DEBUG
6 depends on SYSFS 6 depends on SYSFS
7 depends on MTD_UBI 7 depends on MTD_UBI
8 select DEBUG_FS 8 select DEBUG_FS
9 select KALLSYMS_ALL 9 select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL
10 help 10 help
11 This option enables UBI debugging. 11 This option enables UBI debugging.
12 12
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 4dfa6b90c21c..3d2d1a69e9a0 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -798,18 +798,18 @@ static int rename_volumes(struct ubi_device *ubi,
798 goto out_free; 798 goto out_free;
799 } 799 }
800 800
801 re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); 801 re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
802 if (!re) { 802 if (!re1) {
803 err = -ENOMEM; 803 err = -ENOMEM;
804 ubi_close_volume(desc); 804 ubi_close_volume(desc);
805 goto out_free; 805 goto out_free;
806 } 806 }
807 807
808 re->remove = 1; 808 re1->remove = 1;
809 re->desc = desc; 809 re1->desc = desc;
810 list_add(&re->list, &rename_list); 810 list_add(&re1->list, &rename_list);
811 dbg_msg("will remove volume %d, name \"%s\"", 811 dbg_msg("will remove volume %d, name \"%s\"",
812 re->desc->vol->vol_id, re->desc->vol->name); 812 re1->desc->vol->vol_id, re1->desc->vol->name);
813 } 813 }
814 814
815 mutex_lock(&ubi->device_mutex); 815 mutex_lock(&ubi->device_mutex);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 372a15ac9995..69b52e9c9489 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -843,7 +843,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
843 case UBI_COMPAT_DELETE: 843 case UBI_COMPAT_DELETE:
844 ubi_msg("\"delete\" compatible internal volume %d:%d" 844 ubi_msg("\"delete\" compatible internal volume %d:%d"
845 " found, will remove it", vol_id, lnum); 845 " found, will remove it", vol_id, lnum);
846 err = add_to_list(si, pnum, ec, &si->corr); 846 err = add_to_list(si, pnum, ec, &si->erase);
847 if (err) 847 if (err)
848 return err; 848 return err;
849 return 0; 849 return 0;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ee7b1d8fbb92..97a435672eaf 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1212,7 +1212,8 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1212retry: 1212retry:
1213 spin_lock(&ubi->wl_lock); 1213 spin_lock(&ubi->wl_lock);
1214 e = ubi->lookuptbl[pnum]; 1214 e = ubi->lookuptbl[pnum];
1215 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { 1215 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1216 in_wl_tree(e, &ubi->erroneous)) {
1216 spin_unlock(&ubi->wl_lock); 1217 spin_unlock(&ubi->wl_lock);
1217 return 0; 1218 return 0;
1218 } 1219 }
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 7a01588fb6fb..e31a6d1919c6 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -633,7 +633,8 @@ struct vortex_private {
633 open:1, 633 open:1,
634 medialock:1, 634 medialock:1,
635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ 635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
636 large_frames:1; /* accept large frames */ 636 large_frames:1, /* accept large frames */
637 handling_irq:1; /* private in_irq indicator */
637 int drv_flags; 638 int drv_flags;
638 u16 status_enable; 639 u16 status_enable;
639 u16 intr_enable; 640 u16 intr_enable;
@@ -646,7 +647,7 @@ struct vortex_private {
646 u16 io_size; /* Size of PCI region (for release_region) */ 647 u16 io_size; /* Size of PCI region (for release_region) */
647 648
648 /* Serialises access to hardware other than MII and variables below. 649 /* Serialises access to hardware other than MII and variables below.
649 * The lock hierarchy is rtnl_lock > lock > mii_lock > window_lock. */ 650 * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
650 spinlock_t lock; 651 spinlock_t lock;
651 652
652 spinlock_t mii_lock; /* Serialises access to MII */ 653 spinlock_t mii_lock; /* Serialises access to MII */
@@ -1993,10 +1994,9 @@ vortex_error(struct net_device *dev, int status)
1993 } 1994 }
1994 } 1995 }
1995 1996
1996 if (status & RxEarly) { /* Rx early is unused. */ 1997 if (status & RxEarly) /* Rx early is unused. */
1997 vortex_rx(dev);
1998 iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); 1998 iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
1999 } 1999
2000 if (status & StatsFull) { /* Empty statistics. */ 2000 if (status & StatsFull) { /* Empty statistics. */
2001 static int DoneDidThat; 2001 static int DoneDidThat;
2002 if (vortex_debug > 4) 2002 if (vortex_debug > 4)
@@ -2133,6 +2133,15 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2133 dev->name, vp->cur_tx); 2133 dev->name, vp->cur_tx);
2134 } 2134 }
2135 2135
2136 /*
2137 * We can't allow a recursion from our interrupt handler back into the
2138 * tx routine, as they take the same spin lock, and that causes
2139 * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in
2140 * a bit
2141 */
2142 if (vp->handling_irq)
2143 return NETDEV_TX_BUSY;
2144
2136 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { 2145 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2137 if (vortex_debug > 0) 2146 if (vortex_debug > 0)
2138 pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", 2147 pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
@@ -2288,7 +2297,12 @@ vortex_interrupt(int irq, void *dev_id)
2288 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { 2297 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
2289 if (status == 0xffff) 2298 if (status == 0xffff)
2290 break; 2299 break;
2300 if (status & RxEarly)
2301 vortex_rx(dev);
2302 spin_unlock(&vp->window_lock);
2291 vortex_error(dev, status); 2303 vortex_error(dev, status);
2304 spin_lock(&vp->window_lock);
2305 window_set(vp, 7);
2292 } 2306 }
2293 2307
2294 if (--work_done < 0) { 2308 if (--work_done < 0) {
@@ -2335,11 +2349,13 @@ boomerang_interrupt(int irq, void *dev_id)
2335 2349
2336 ioaddr = vp->ioaddr; 2350 ioaddr = vp->ioaddr;
2337 2351
2352
2338 /* 2353 /*
2339 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout 2354 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
2340 * and boomerang_start_xmit 2355 * and boomerang_start_xmit
2341 */ 2356 */
2342 spin_lock(&vp->lock); 2357 spin_lock(&vp->lock);
2358 vp->handling_irq = 1;
2343 2359
2344 status = ioread16(ioaddr + EL3_STATUS); 2360 status = ioread16(ioaddr + EL3_STATUS);
2345 2361
@@ -2447,6 +2463,7 @@ boomerang_interrupt(int irq, void *dev_id)
2447 pr_debug("%s: exiting interrupt, status %4.4x.\n", 2463 pr_debug("%s: exiting interrupt, status %4.4x.\n",
2448 dev->name, status); 2464 dev->name, status);
2449handler_exit: 2465handler_exit:
2466 vp->handling_irq = 0;
2450 spin_unlock(&vp->lock); 2467 spin_unlock(&vp->lock);
2451 return IRQ_HANDLED; 2468 return IRQ_HANDLED;
2452} 2469}
@@ -2971,7 +2988,6 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2971{ 2988{
2972 int err; 2989 int err;
2973 struct vortex_private *vp = netdev_priv(dev); 2990 struct vortex_private *vp = netdev_priv(dev);
2974 unsigned long flags;
2975 pci_power_t state = 0; 2991 pci_power_t state = 0;
2976 2992
2977 if(VORTEX_PCI(vp)) 2993 if(VORTEX_PCI(vp))
@@ -2981,9 +2997,7 @@ static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2981 2997
2982 if(state != 0) 2998 if(state != 0)
2983 pci_set_power_state(VORTEX_PCI(vp), PCI_D0); 2999 pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
2984 spin_lock_irqsave(&vp->lock, flags);
2985 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); 3000 err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
2986 spin_unlock_irqrestore(&vp->lock, flags);
2987 if(state != 0) 3001 if(state != 0)
2988 pci_set_power_state(VORTEX_PCI(vp), state); 3002 pci_set_power_state(VORTEX_PCI(vp), state);
2989 3003
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 7342308718a0..8e7c8a8e61c7 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -848,6 +848,15 @@ static int b44_poll(struct napi_struct *napi, int budget)
848 b44_tx(bp); 848 b44_tx(bp);
849 /* spin_unlock(&bp->tx_lock); */ 849 /* spin_unlock(&bp->tx_lock); */
850 } 850 }
851 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
852 bp->istat &= ~ISTAT_RFO;
853 b44_disable_ints(bp);
854 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
855 b44_init_rings(bp);
856 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
857 netif_wake_queue(bp->dev);
858 }
859
851 spin_unlock_irqrestore(&bp->lock, flags); 860 spin_unlock_irqrestore(&bp->lock, flags);
852 861
853 work_done = 0; 862 work_done = 0;
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index d1fb7928ffc5..4faf6961dcec 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -181,6 +181,7 @@ struct be_drvr_stats {
181 u64 be_rx_bytes_prev; 181 u64 be_rx_bytes_prev;
182 u64 be_rx_pkts; 182 u64 be_rx_pkts;
183 u32 be_rx_rate; 183 u32 be_rx_rate;
184 u32 be_rx_mcast_pkt;
184 /* number of non ether type II frames dropped where 185 /* number of non ether type II frames dropped where
185 * frame len > length field of Mac Hdr */ 186 * frame len > length field of Mac Hdr */
186 u32 be_802_3_dropped_frames; 187 u32 be_802_3_dropped_frames;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3d305494a606..34abcc9403d6 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -140,10 +140,8 @@ int be_process_mcc(struct be_adapter *adapter, int *status)
140 while ((compl = be_mcc_compl_get(adapter))) { 140 while ((compl = be_mcc_compl_get(adapter))) {
141 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 141 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
142 /* Interpret flags as an async trailer */ 142 /* Interpret flags as an async trailer */
143 BUG_ON(!is_link_state_evt(compl->flags)); 143 if (is_link_state_evt(compl->flags))
144 144 be_async_link_state_process(adapter,
145 /* Interpret compl as a async link evt */
146 be_async_link_state_process(adapter,
147 (struct be_async_event_link_state *) compl); 145 (struct be_async_event_link_state *) compl);
148 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 146 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
149 *status = be_mcc_compl_process(adapter, compl); 147 *status = be_mcc_compl_process(adapter, compl);
@@ -207,7 +205,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
207 205
208 if (msecs > 4000) { 206 if (msecs > 4000) {
209 dev_err(&adapter->pdev->dev, "mbox poll timed out\n"); 207 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
210 be_dump_ue(adapter); 208 be_detect_dump_ue(adapter);
211 return -1; 209 return -1;
212 } 210 }
213 211
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index bdc10a28cfda..ad1e6fac60c5 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -992,5 +992,5 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
992extern int be_cmd_get_phy_info(struct be_adapter *adapter, 992extern int be_cmd_get_phy_info(struct be_adapter *adapter,
993 struct be_dma_mem *cmd); 993 struct be_dma_mem *cmd);
994extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 994extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
995extern void be_dump_ue(struct be_adapter *adapter); 995extern void be_detect_dump_ue(struct be_adapter *adapter);
996 996
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9bedb9ff6e12..d92063420c25 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -60,6 +60,7 @@ static const struct be_ethtool_stat et_stats[] = {
60 {DRVSTAT_INFO(be_rx_events)}, 60 {DRVSTAT_INFO(be_rx_events)},
61 {DRVSTAT_INFO(be_tx_compl)}, 61 {DRVSTAT_INFO(be_tx_compl)},
62 {DRVSTAT_INFO(be_rx_compl)}, 62 {DRVSTAT_INFO(be_rx_compl)},
63 {DRVSTAT_INFO(be_rx_mcast_pkt)},
63 {DRVSTAT_INFO(be_ethrx_post_fail)}, 64 {DRVSTAT_INFO(be_ethrx_post_fail)},
64 {DRVSTAT_INFO(be_802_3_dropped_frames)}, 65 {DRVSTAT_INFO(be_802_3_dropped_frames)},
65 {DRVSTAT_INFO(be_802_3_malformed_frames)}, 66 {DRVSTAT_INFO(be_802_3_malformed_frames)},
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 5d38046402b2..a2ec5df0d733 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -167,8 +167,11 @@
167#define FLASH_FCoE_BIOS_START_g3 (13631488) 167#define FLASH_FCoE_BIOS_START_g3 (13631488)
168#define FLASH_REDBOOT_START_g3 (262144) 168#define FLASH_REDBOOT_START_g3 (262144)
169 169
170 170/************* Rx Packet Type Encoding **************/
171 171#define BE_UNICAST_PACKET 0
172#define BE_MULTICAST_PACKET 1
173#define BE_BROADCAST_PACKET 2
174#define BE_RSVD_PACKET 3
172 175
173/* 176/*
174 * BE descriptors: host memory data structures whose formats 177 * BE descriptors: host memory data structures whose formats
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index dcee7a20c0b9..43a3a574e2e0 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -247,6 +247,7 @@ void netdev_stats_update(struct be_adapter *adapter)
247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts; 247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes; 248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes; 249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
250 dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
250 251
251 /* bad pkts received */ 252 /* bad pkts received */
252 dev_stats->rx_errors = port_stats->rx_crc_errors + 253 dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -294,7 +295,6 @@ void netdev_stats_update(struct be_adapter *adapter)
294 /* no space available in linux */ 295 /* no space available in linux */
295 dev_stats->tx_dropped = 0; 296 dev_stats->tx_dropped = 0;
296 297
297 dev_stats->multicast = port_stats->rx_multicast_frames;
298 dev_stats->collisions = 0; 298 dev_stats->collisions = 0;
299 299
300 /* detailed tx_errors */ 300 /* detailed tx_errors */
@@ -843,7 +843,7 @@ static void be_rx_rate_update(struct be_adapter *adapter)
843} 843}
844 844
845static void be_rx_stats_update(struct be_adapter *adapter, 845static void be_rx_stats_update(struct be_adapter *adapter,
846 u32 pktsize, u16 numfrags) 846 u32 pktsize, u16 numfrags, u8 pkt_type)
847{ 847{
848 struct be_drvr_stats *stats = drvr_stats(adapter); 848 struct be_drvr_stats *stats = drvr_stats(adapter);
849 849
@@ -851,6 +851,9 @@ static void be_rx_stats_update(struct be_adapter *adapter,
851 stats->be_rx_frags += numfrags; 851 stats->be_rx_frags += numfrags;
852 stats->be_rx_bytes += pktsize; 852 stats->be_rx_bytes += pktsize;
853 stats->be_rx_pkts++; 853 stats->be_rx_pkts++;
854
855 if (pkt_type == BE_MULTICAST_PACKET)
856 stats->be_rx_mcast_pkt++;
854} 857}
855 858
856static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso) 859static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -920,9 +923,11 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
920 u16 rxq_idx, i, j; 923 u16 rxq_idx, i, j;
921 u32 pktsize, hdr_len, curr_frag_len, size; 924 u32 pktsize, hdr_len, curr_frag_len, size;
922 u8 *start; 925 u8 *start;
926 u8 pkt_type;
923 927
924 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 928 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
925 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 929 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
930 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
926 931
927 page_info = get_rx_page_info(adapter, rxq_idx); 932 page_info = get_rx_page_info(adapter, rxq_idx);
928 933
@@ -988,7 +993,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
988 BUG_ON(j > MAX_SKB_FRAGS); 993 BUG_ON(j > MAX_SKB_FRAGS);
989 994
990done: 995done:
991 be_rx_stats_update(adapter, pktsize, num_rcvd); 996 be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
992} 997}
993 998
994/* Process the RX completion indicated by rxcp when GRO is disabled */ 999/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -1055,6 +1060,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1055 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; 1060 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1056 u16 i, rxq_idx = 0, vid, j; 1061 u16 i, rxq_idx = 0, vid, j;
1057 u8 vtm; 1062 u8 vtm;
1063 u8 pkt_type;
1058 1064
1059 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 1065 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1060 /* Is it a flush compl that has no data */ 1066 /* Is it a flush compl that has no data */
@@ -1065,6 +1071,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1065 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 1071 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1066 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 1072 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1067 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); 1073 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1074 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1068 1075
1069 /* vlanf could be wrongly set in some cards. 1076 /* vlanf could be wrongly set in some cards.
1070 * ignore if vtm is not set */ 1077 * ignore if vtm is not set */
@@ -1120,7 +1127,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1120 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); 1127 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1121 } 1128 }
1122 1129
1123 be_rx_stats_update(adapter, pkt_size, num_rcvd); 1130 be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
1124} 1131}
1125 1132
1126static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) 1133static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -1738,26 +1745,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1738 return 1; 1745 return 1;
1739} 1746}
1740 1747
1741static inline bool be_detect_ue(struct be_adapter *adapter) 1748void be_detect_dump_ue(struct be_adapter *adapter)
1742{
1743 u32 online0 = 0, online1 = 0;
1744
1745 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
1746
1747 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
1748
1749 if (!online0 || !online1) {
1750 adapter->ue_detected = true;
1751 dev_err(&adapter->pdev->dev,
1752 "UE Detected!! online0=%d online1=%d\n",
1753 online0, online1);
1754 return true;
1755 }
1756
1757 return false;
1758}
1759
1760void be_dump_ue(struct be_adapter *adapter)
1761{ 1749{
1762 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask; 1750 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1763 u32 i; 1751 u32 i;
@@ -1774,6 +1762,11 @@ void be_dump_ue(struct be_adapter *adapter)
1774 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask)); 1762 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1775 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask)); 1763 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1776 1764
1765 if (ue_status_lo || ue_status_hi) {
1766 adapter->ue_detected = true;
1767 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1768 }
1769
1777 if (ue_status_lo) { 1770 if (ue_status_lo) {
1778 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) { 1771 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1779 if (ue_status_lo & 1) 1772 if (ue_status_lo & 1)
@@ -1809,10 +1802,8 @@ static void be_worker(struct work_struct *work)
1809 adapter->rx_post_starved = false; 1802 adapter->rx_post_starved = false;
1810 be_post_rx_frags(adapter); 1803 be_post_rx_frags(adapter);
1811 } 1804 }
1812 if (!adapter->ue_detected) { 1805 if (!adapter->ue_detected)
1813 if (be_detect_ue(adapter)) 1806 be_detect_dump_ue(adapter);
1814 be_dump_ue(adapter);
1815 }
1816 1807
1817 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1808 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1818} 1809}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2cc4cfc31892..3b16f62d5606 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2797,9 +2797,15 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2797 * so it can wait 2797 * so it can wait
2798 */ 2798 */
2799 bond_for_each_slave(bond, slave, i) { 2799 bond_for_each_slave(bond, slave, i) {
2800 unsigned long trans_start = dev_trans_start(slave->dev);
2801
2800 if (slave->link != BOND_LINK_UP) { 2802 if (slave->link != BOND_LINK_UP) {
2801 if (time_before_eq(jiffies, dev_trans_start(slave->dev) + delta_in_ticks) && 2803 if (time_in_range(jiffies,
2802 time_before_eq(jiffies, slave->dev->last_rx + delta_in_ticks)) { 2804 trans_start - delta_in_ticks,
2805 trans_start + delta_in_ticks) &&
2806 time_in_range(jiffies,
2807 slave->dev->last_rx - delta_in_ticks,
2808 slave->dev->last_rx + delta_in_ticks)) {
2803 2809
2804 slave->link = BOND_LINK_UP; 2810 slave->link = BOND_LINK_UP;
2805 slave->state = BOND_STATE_ACTIVE; 2811 slave->state = BOND_STATE_ACTIVE;
@@ -2827,8 +2833,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
2827 * when the source ip is 0, so don't take the link down 2833 * when the source ip is 0, so don't take the link down
2828 * if we don't know our ip yet 2834 * if we don't know our ip yet
2829 */ 2835 */
2830 if (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2*delta_in_ticks) || 2836 if (!time_in_range(jiffies,
2831 (time_after_eq(jiffies, slave->dev->last_rx + 2*delta_in_ticks))) { 2837 trans_start - delta_in_ticks,
2838 trans_start + 2 * delta_in_ticks) ||
2839 !time_in_range(jiffies,
2840 slave->dev->last_rx - delta_in_ticks,
2841 slave->dev->last_rx + 2 * delta_in_ticks)) {
2832 2842
2833 slave->link = BOND_LINK_DOWN; 2843 slave->link = BOND_LINK_DOWN;
2834 slave->state = BOND_STATE_BACKUP; 2844 slave->state = BOND_STATE_BACKUP;
@@ -2883,13 +2893,16 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2883{ 2893{
2884 struct slave *slave; 2894 struct slave *slave;
2885 int i, commit = 0; 2895 int i, commit = 0;
2896 unsigned long trans_start;
2886 2897
2887 bond_for_each_slave(bond, slave, i) { 2898 bond_for_each_slave(bond, slave, i) {
2888 slave->new_link = BOND_LINK_NOCHANGE; 2899 slave->new_link = BOND_LINK_NOCHANGE;
2889 2900
2890 if (slave->link != BOND_LINK_UP) { 2901 if (slave->link != BOND_LINK_UP) {
2891 if (time_before_eq(jiffies, slave_last_rx(bond, slave) + 2902 if (time_in_range(jiffies,
2892 delta_in_ticks)) { 2903 slave_last_rx(bond, slave) - delta_in_ticks,
2904 slave_last_rx(bond, slave) + delta_in_ticks)) {
2905
2893 slave->new_link = BOND_LINK_UP; 2906 slave->new_link = BOND_LINK_UP;
2894 commit++; 2907 commit++;
2895 } 2908 }
@@ -2902,8 +2915,9 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2902 * active. This avoids bouncing, as the last receive 2915 * active. This avoids bouncing, as the last receive
2903 * times need a full ARP monitor cycle to be updated. 2916 * times need a full ARP monitor cycle to be updated.
2904 */ 2917 */
2905 if (!time_after_eq(jiffies, slave->jiffies + 2918 if (time_in_range(jiffies,
2906 2 * delta_in_ticks)) 2919 slave->jiffies - delta_in_ticks,
2920 slave->jiffies + 2 * delta_in_ticks))
2907 continue; 2921 continue;
2908 2922
2909 /* 2923 /*
@@ -2921,8 +2935,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2921 */ 2935 */
2922 if (slave->state == BOND_STATE_BACKUP && 2936 if (slave->state == BOND_STATE_BACKUP &&
2923 !bond->current_arp_slave && 2937 !bond->current_arp_slave &&
2924 time_after(jiffies, slave_last_rx(bond, slave) + 2938 !time_in_range(jiffies,
2925 3 * delta_in_ticks)) { 2939 slave_last_rx(bond, slave) - delta_in_ticks,
2940 slave_last_rx(bond, slave) + 3 * delta_in_ticks)) {
2941
2926 slave->new_link = BOND_LINK_DOWN; 2942 slave->new_link = BOND_LINK_DOWN;
2927 commit++; 2943 commit++;
2928 } 2944 }
@@ -2933,11 +2949,15 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
2933 * - (more than 2*delta since receive AND 2949 * - (more than 2*delta since receive AND
2934 * the bond has an IP address) 2950 * the bond has an IP address)
2935 */ 2951 */
2952 trans_start = dev_trans_start(slave->dev);
2936 if ((slave->state == BOND_STATE_ACTIVE) && 2953 if ((slave->state == BOND_STATE_ACTIVE) &&
2937 (time_after_eq(jiffies, dev_trans_start(slave->dev) + 2954 (!time_in_range(jiffies,
2938 2 * delta_in_ticks) || 2955 trans_start - delta_in_ticks,
2939 (time_after_eq(jiffies, slave_last_rx(bond, slave) 2956 trans_start + 2 * delta_in_ticks) ||
2940 + 2 * delta_in_ticks)))) { 2957 !time_in_range(jiffies,
2958 slave_last_rx(bond, slave) - delta_in_ticks,
2959 slave_last_rx(bond, slave) + 2 * delta_in_ticks))) {
2960
2941 slave->new_link = BOND_LINK_DOWN; 2961 slave->new_link = BOND_LINK_DOWN;
2942 commit++; 2962 commit++;
2943 } 2963 }
@@ -2956,6 +2976,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2956{ 2976{
2957 struct slave *slave; 2977 struct slave *slave;
2958 int i; 2978 int i;
2979 unsigned long trans_start;
2959 2980
2960 bond_for_each_slave(bond, slave, i) { 2981 bond_for_each_slave(bond, slave, i) {
2961 switch (slave->new_link) { 2982 switch (slave->new_link) {
@@ -2963,10 +2984,11 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
2963 continue; 2984 continue;
2964 2985
2965 case BOND_LINK_UP: 2986 case BOND_LINK_UP:
2987 trans_start = dev_trans_start(slave->dev);
2966 if ((!bond->curr_active_slave && 2988 if ((!bond->curr_active_slave &&
2967 time_before_eq(jiffies, 2989 time_in_range(jiffies,
2968 dev_trans_start(slave->dev) + 2990 trans_start - delta_in_ticks,
2969 delta_in_ticks)) || 2991 trans_start + delta_in_ticks)) ||
2970 bond->curr_active_slave != slave) { 2992 bond->curr_active_slave != slave) {
2971 slave->link = BOND_LINK_UP; 2993 slave->link = BOND_LINK_UP;
2972 bond->current_arp_slave = NULL; 2994 bond->current_arp_slave = NULL;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 631a6242b011..75bfc3a9d95f 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -15,7 +15,7 @@ config CAIF_TTY
15 15
16config CAIF_SPI_SLAVE 16config CAIF_SPI_SLAVE
17 tristate "CAIF SPI transport driver for slave interface" 17 tristate "CAIF SPI transport driver for slave interface"
18 depends on CAIF 18 depends on CAIF && HAS_DMA
19 default n 19 default n
20 ---help--- 20 ---help---
21 The CAIF Link layer SPI Protocol driver for Slave SPI interface. 21 The CAIF Link layer SPI Protocol driver for Slave SPI interface.
diff --git a/drivers/net/ibm_newemac/debug.c b/drivers/net/ibm_newemac/debug.c
index 3995fafc1e08..8c6c1e2a8750 100644
--- a/drivers/net/ibm_newemac/debug.c
+++ b/drivers/net/ibm_newemac/debug.c
@@ -238,7 +238,7 @@ void emac_dbg_dump_all(void)
238} 238}
239 239
240#if defined(CONFIG_MAGIC_SYSRQ) 240#if defined(CONFIG_MAGIC_SYSRQ)
241static void emac_sysrq_handler(int key, struct tty_struct *tty) 241static void emac_sysrq_handler(int key)
242{ 242{
243 emac_dbg_dump_all(); 243 emac_dbg_dump_all();
244} 244}
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index b4fb07a6f13f..51919fcd50c2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -503,30 +503,33 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
503 ks8851_wrreg16(ks, KS_RXQCR, 503 ks8851_wrreg16(ks, KS_RXQCR,
504 ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE); 504 ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
505 505
506 if (rxlen > 0) { 506 if (rxlen > 4) {
507 skb = netdev_alloc_skb(ks->netdev, rxlen + 2 + 8); 507 unsigned int rxalign;
508 if (!skb) { 508
509 /* todo - dump frame and move on */ 509 rxlen -= 4;
510 } 510 rxalign = ALIGN(rxlen, 4);
511 skb = netdev_alloc_skb_ip_align(ks->netdev, rxalign);
512 if (skb) {
511 513
512 /* two bytes to ensure ip is aligned, and four bytes 514 /* 4 bytes of status header + 4 bytes of
513 * for the status header and 4 bytes of garbage */ 515 * garbage: we put them before ethernet
514 skb_reserve(skb, 2 + 4 + 4); 516 * header, so that they are copied,
517 * but ignored.
518 */
515 519
516 rxpkt = skb_put(skb, rxlen - 4) - 8; 520 rxpkt = skb_put(skb, rxlen) - 8;
517 521
518 /* align the packet length to 4 bytes, and add 4 bytes 522 ks8851_rdfifo(ks, rxpkt, rxalign + 8);
519 * as we're getting the rx status header as well */
520 ks8851_rdfifo(ks, rxpkt, ALIGN(rxlen, 4) + 8);
521 523
522 if (netif_msg_pktdata(ks)) 524 if (netif_msg_pktdata(ks))
523 ks8851_dbg_dumpkkt(ks, rxpkt); 525 ks8851_dbg_dumpkkt(ks, rxpkt);
524 526
525 skb->protocol = eth_type_trans(skb, ks->netdev); 527 skb->protocol = eth_type_trans(skb, ks->netdev);
526 netif_rx(skb); 528 netif_rx(skb);
527 529
528 ks->netdev->stats.rx_packets++; 530 ks->netdev->stats.rx_packets++;
529 ks->netdev->stats.rx_bytes += rxlen - 4; 531 ks->netdev->stats.rx_bytes += rxlen;
532 }
530 } 533 }
531 534
532 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); 535 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index cb30df106a2c..73d314592230 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -2131,9 +2131,16 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
2131#ifdef CONFIG_NET_POLL_CONTROLLER 2131#ifdef CONFIG_NET_POLL_CONTROLLER
2132static void netxen_nic_poll_controller(struct net_device *netdev) 2132static void netxen_nic_poll_controller(struct net_device *netdev)
2133{ 2133{
2134 int ring;
2135 struct nx_host_sds_ring *sds_ring;
2134 struct netxen_adapter *adapter = netdev_priv(netdev); 2136 struct netxen_adapter *adapter = netdev_priv(netdev);
2137 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
2138
2135 disable_irq(adapter->irq); 2139 disable_irq(adapter->irq);
2136 netxen_intr(adapter->irq, adapter); 2140 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2141 sds_ring = &recv_ctx->sds_rings[ring];
2142 netxen_intr(adapter->irq, sds_ring);
2143 }
2137 enable_irq(adapter->irq); 2144 enable_irq(adapter->irq);
2138} 2145}
2139#endif 2146#endif
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 1f89f472cbfb..8e1859c801a4 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -7269,32 +7269,28 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
7269 struct niu_parent *parent = np->parent; 7269 struct niu_parent *parent = np->parent;
7270 struct niu_tcam_entry *tp; 7270 struct niu_tcam_entry *tp;
7271 int i, idx, cnt; 7271 int i, idx, cnt;
7272 u16 n_entries;
7273 unsigned long flags; 7272 unsigned long flags;
7274 7273 int ret = 0;
7275 7274
7276 /* put the tcam size here */ 7275 /* put the tcam size here */
7277 nfc->data = tcam_get_size(np); 7276 nfc->data = tcam_get_size(np);
7278 7277
7279 niu_lock_parent(np, flags); 7278 niu_lock_parent(np, flags);
7280 n_entries = nfc->rule_cnt;
7281 for (cnt = 0, i = 0; i < nfc->data; i++) { 7279 for (cnt = 0, i = 0; i < nfc->data; i++) {
7282 idx = tcam_get_index(np, i); 7280 idx = tcam_get_index(np, i);
7283 tp = &parent->tcam[idx]; 7281 tp = &parent->tcam[idx];
7284 if (!tp->valid) 7282 if (!tp->valid)
7285 continue; 7283 continue;
7284 if (cnt == nfc->rule_cnt) {
7285 ret = -EMSGSIZE;
7286 break;
7287 }
7286 rule_locs[cnt] = i; 7288 rule_locs[cnt] = i;
7287 cnt++; 7289 cnt++;
7288 } 7290 }
7289 niu_unlock_parent(np, flags); 7291 niu_unlock_parent(np, flags);
7290 7292
7291 if (n_entries != cnt) { 7293 return ret;
7292 /* print warning, this should not happen */
7293 netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
7294 np->parent->index, __func__, n_entries, cnt);
7295 }
7296
7297 return 0;
7298} 7294}
7299 7295
7300static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, 7296static int niu_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 5ad42e0aee25..e180832c278f 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1617,6 +1617,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1617 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b), 1617 PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCETTX", 0x547e66dc, 0x6fc5459b),
1618 PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0), 1618 PCMCIA_DEVICE_PROD_ID12("iPort", "10/100 Ethernet Card", 0x56c538d2, 0x11b0ffc0),
1619 PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956), 1619 PCMCIA_DEVICE_PROD_ID12("KANSAI ELECTRIC CO.,LTD", "KLA-PCM/T", 0xb18dc3b4, 0xcc51a956),
1620 PCMCIA_DEVICE_PROD_ID12("KENTRONICS", "KEP-230", 0xaf8144c9, 0x868f6616),
1620 PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64), 1621 PCMCIA_DEVICE_PROD_ID12("KCI", "PE520 PCMCIA Ethernet Adapter", 0xa89b87d3, 0x1eb88e64),
1621 PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5), 1622 PCMCIA_DEVICE_PROD_ID12("KINGMAX", "EN10T2T", 0x7bcb459a, 0xa5c81fa5),
1622 PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3), 1623 PCMCIA_DEVICE_PROD_ID12("Kingston", "KNE-PC2", 0x1128e633, 0xce2a89b3),
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0761197c07e..16ddc77313cb 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -466,6 +466,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
466 466
467 phydev->interface = interface; 467 phydev->interface = interface;
468 468
469 phydev->state = PHY_READY;
470
469 /* Do initial configuration here, now that 471 /* Do initial configuration here, now that
470 * we have certain key parameters 472 * we have certain key parameters
471 * (dev_flags and interface) */ 473 * (dev_flags and interface) */
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 764aa9099468..75c2ff99d66d 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -652,15 +652,15 @@ static void eth_port_start(struct net_device *dev)
652 /* Assignment of Tx CTRP of given queue */ 652 /* Assignment of Tx CTRP of given queue */
653 tx_curr_desc = pep->tx_curr_desc_q; 653 tx_curr_desc = pep->tx_curr_desc_q;
654 wrl(pep, ETH_C_TX_DESC_1, 654 wrl(pep, ETH_C_TX_DESC_1,
655 (u32) ((struct tx_desc *)pep->tx_desc_dma + tx_curr_desc)); 655 (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
656 656
657 /* Assignment of Rx CRDP of given queue */ 657 /* Assignment of Rx CRDP of given queue */
658 rx_curr_desc = pep->rx_curr_desc_q; 658 rx_curr_desc = pep->rx_curr_desc_q;
659 wrl(pep, ETH_C_RX_DESC_0, 659 wrl(pep, ETH_C_RX_DESC_0,
660 (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc)); 660 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
661 661
662 wrl(pep, ETH_F_RX_DESC_0, 662 wrl(pep, ETH_F_RX_DESC_0,
663 (u32) ((struct rx_desc *)pep->rx_desc_dma + rx_curr_desc)); 663 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
664 664
665 /* Clear all interrupts */ 665 /* Clear all interrupts */
666 wrl(pep, INT_CAUSE, 0); 666 wrl(pep, INT_CAUSE, 0);
@@ -1347,7 +1347,7 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
1347{ 1347{
1348 struct pxa168_eth_private *pep = netdev_priv(dev); 1348 struct pxa168_eth_private *pep = netdev_priv(dev);
1349 if (pep->phy != NULL) 1349 if (pep->phy != NULL)
1350 return phy_mii_ioctl(pep->phy, if_mii(ifr), cmd); 1350 return phy_mii_ioctl(pep->phy, ifr, cmd);
1351 1351
1352 return -EOPNOTSUPP; 1352 return -EOPNOTSUPP;
1353} 1353}
@@ -1411,10 +1411,8 @@ static int ethernet_phy_setup(struct net_device *dev)
1411{ 1411{
1412 struct pxa168_eth_private *pep = netdev_priv(dev); 1412 struct pxa168_eth_private *pep = netdev_priv(dev);
1413 1413
1414 if (pep->pd != NULL) { 1414 if (pep->pd->init)
1415 if (pep->pd->init) 1415 pep->pd->init();
1416 pep->pd->init();
1417 }
1418 pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f); 1416 pep->phy = phy_scan(pep, pep->pd->phy_addr & 0x1f);
1419 if (pep->phy != NULL) 1417 if (pep->phy != NULL)
1420 phy_init(pep, pep->pd->speed, pep->pd->duplex); 1418 phy_init(pep, pep->pd->speed, pep->pd->duplex);
@@ -1496,7 +1494,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1496 dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); 1494 dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
1497 if (!dev) { 1495 if (!dev) {
1498 err = -ENOMEM; 1496 err = -ENOMEM;
1499 goto out; 1497 goto err_clk;
1500 } 1498 }
1501 1499
1502 platform_set_drvdata(pdev, dev); 1500 platform_set_drvdata(pdev, dev);
@@ -1506,12 +1504,12 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1506 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1504 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1507 if (res == NULL) { 1505 if (res == NULL) {
1508 err = -ENODEV; 1506 err = -ENODEV;
1509 goto out; 1507 goto err_netdev;
1510 } 1508 }
1511 pep->base = ioremap(res->start, res->end - res->start + 1); 1509 pep->base = ioremap(res->start, res->end - res->start + 1);
1512 if (pep->base == NULL) { 1510 if (pep->base == NULL) {
1513 err = -ENOMEM; 1511 err = -ENOMEM;
1514 goto out; 1512 goto err_netdev;
1515 } 1513 }
1516 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1514 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1517 BUG_ON(!res); 1515 BUG_ON(!res);
@@ -1548,7 +1546,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1548 pep->smi_bus = mdiobus_alloc(); 1546 pep->smi_bus = mdiobus_alloc();
1549 if (pep->smi_bus == NULL) { 1547 if (pep->smi_bus == NULL) {
1550 err = -ENOMEM; 1548 err = -ENOMEM;
1551 goto out; 1549 goto err_base;
1552 } 1550 }
1553 pep->smi_bus->priv = pep; 1551 pep->smi_bus->priv = pep;
1554 pep->smi_bus->name = "pxa168_eth smi"; 1552 pep->smi_bus->name = "pxa168_eth smi";
@@ -1557,31 +1555,31 @@ static int pxa168_eth_probe(struct platform_device *pdev)
1557 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id); 1555 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
1558 pep->smi_bus->parent = &pdev->dev; 1556 pep->smi_bus->parent = &pdev->dev;
1559 pep->smi_bus->phy_mask = 0xffffffff; 1557 pep->smi_bus->phy_mask = 0xffffffff;
1560 if (mdiobus_register(pep->smi_bus) < 0) { 1558 err = mdiobus_register(pep->smi_bus);
1561 err = -ENOMEM; 1559 if (err)
1562 goto out; 1560 goto err_free_mdio;
1563 } 1561
1564 pxa168_init_hw(pep); 1562 pxa168_init_hw(pep);
1565 err = ethernet_phy_setup(dev); 1563 err = ethernet_phy_setup(dev);
1566 if (err) 1564 if (err)
1567 goto out; 1565 goto err_mdiobus;
1568 SET_NETDEV_DEV(dev, &pdev->dev); 1566 SET_NETDEV_DEV(dev, &pdev->dev);
1569 err = register_netdev(dev); 1567 err = register_netdev(dev);
1570 if (err) 1568 if (err)
1571 goto out; 1569 goto err_mdiobus;
1572 return 0; 1570 return 0;
1573out: 1571
1574 if (pep->clk) { 1572err_mdiobus:
1575 clk_disable(pep->clk); 1573 mdiobus_unregister(pep->smi_bus);
1576 clk_put(pep->clk); 1574err_free_mdio:
1577 pep->clk = NULL; 1575 mdiobus_free(pep->smi_bus);
1578 } 1576err_base:
1579 if (pep->base) { 1577 iounmap(pep->base);
1580 iounmap(pep->base); 1578err_netdev:
1581 pep->base = NULL; 1579 free_netdev(dev);
1582 } 1580err_clk:
1583 if (dev) 1581 clk_disable(clk);
1584 free_netdev(dev); 1582 clk_put(clk);
1585 return err; 1583 return err;
1586} 1584}
1587 1585
@@ -1605,6 +1603,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
1605 1603
1606 iounmap(pep->base); 1604 iounmap(pep->base);
1607 pep->base = NULL; 1605 pep->base = NULL;
1606 mdiobus_unregister(pep->smi_bus);
1607 mdiobus_free(pep->smi_bus);
1608 unregister_netdev(dev); 1608 unregister_netdev(dev);
1609 flush_scheduled_work(); 1609 flush_scheduled_work();
1610 free_netdev(dev); 1610 free_netdev(dev);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 5a6445a691b3..5fd2abd1eb67 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2513,9 +2513,16 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2513#ifdef CONFIG_NET_POLL_CONTROLLER 2513#ifdef CONFIG_NET_POLL_CONTROLLER
2514static void qlcnic_poll_controller(struct net_device *netdev) 2514static void qlcnic_poll_controller(struct net_device *netdev)
2515{ 2515{
2516 int ring;
2517 struct qlcnic_host_sds_ring *sds_ring;
2516 struct qlcnic_adapter *adapter = netdev_priv(netdev); 2518 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2519 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
2520
2517 disable_irq(adapter->irq); 2521 disable_irq(adapter->irq);
2518 qlcnic_intr(adapter->irq, adapter); 2522 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2523 sds_ring = &recv_ctx->sds_rings[ring];
2524 qlcnic_intr(adapter->irq, sds_ring);
2525 }
2519 enable_irq(adapter->irq); 2526 enable_irq(adapter->irq);
2520} 2527}
2521#endif 2528#endif
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index e2d0e108b9aa..4ffebe83d883 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3926,12 +3926,12 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3926 for (i = 0; i < qdev->rss_ring_count; i++) 3926 for (i = 0; i < qdev->rss_ring_count; i++)
3927 netif_napi_del(&qdev->rx_ring[i].napi); 3927 netif_napi_del(&qdev->rx_ring[i].napi);
3928 3928
3929 ql_free_rx_buffers(qdev);
3930
3931 status = ql_adapter_reset(qdev); 3929 status = ql_adapter_reset(qdev);
3932 if (status) 3930 if (status)
3933 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", 3931 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3934 qdev->func); 3932 qdev->func);
3933 ql_free_rx_buffers(qdev);
3934
3935 return status; 3935 return status;
3936} 3936}
3937 3937
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 1ccea76d89ae..03c160c6d75c 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1862,15 +1862,15 @@ static int stmmac_resume(struct platform_device *pdev)
1862 if (!netif_running(dev)) 1862 if (!netif_running(dev))
1863 return 0; 1863 return 0;
1864 1864
1865 spin_lock(&priv->lock);
1866
1867 if (priv->shutdown) { 1865 if (priv->shutdown) {
1868 /* Re-open the interface and re-init the MAC/DMA 1866 /* Re-open the interface and re-init the MAC/DMA
1869 and the rings. */ 1867 and the rings (i.e. on hibernation stage) */
1870 stmmac_open(dev); 1868 stmmac_open(dev);
1871 goto out_resume; 1869 return 0;
1872 } 1870 }
1873 1871
1872 spin_lock(&priv->lock);
1873
1874 /* Power Down bit, into the PM register, is cleared 1874 /* Power Down bit, into the PM register, is cleared
1875 * automatically as soon as a magic packet or a Wake-up frame 1875 * automatically as soon as a magic packet or a Wake-up frame
1876 * is received. Anyway, it's better to manually clear 1876 * is received. Anyway, it's better to manually clear
@@ -1898,7 +1898,6 @@ static int stmmac_resume(struct platform_device *pdev)
1898 1898
1899 netif_start_queue(dev); 1899 netif_start_queue(dev);
1900 1900
1901out_resume:
1902 spin_unlock(&priv->lock); 1901 spin_unlock(&priv->lock);
1903 return 0; 1902 return 0;
1904} 1903}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 08e7b6abacdd..b2bcf99e6f08 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -58,6 +58,7 @@
58#define USB_PRODUCT_IPHONE 0x1290 58#define USB_PRODUCT_IPHONE 0x1290
59#define USB_PRODUCT_IPHONE_3G 0x1292 59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294 60#define USB_PRODUCT_IPHONE_3GS 0x1294
61#define USB_PRODUCT_IPHONE_4 0x1297
61 62
62#define IPHETH_USBINTF_CLASS 255 63#define IPHETH_USBINTF_CLASS 255
63#define IPHETH_USBINTF_SUBCLASS 253 64#define IPHETH_USBINTF_SUBCLASS 253
@@ -92,6 +93,10 @@ static struct usb_device_id ipheth_table[] = {
92 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS, 93 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
93 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, 94 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
94 IPHETH_USBINTF_PROTO) }, 95 IPHETH_USBINTF_PROTO) },
96 { USB_DEVICE_AND_INTERFACE_INFO(
97 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
98 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
99 IPHETH_USBINTF_PROTO) },
95 { } 100 { }
96}; 101};
97MODULE_DEVICE_TABLE(usb, ipheth_table); 102MODULE_DEVICE_TABLE(usb, ipheth_table);
@@ -424,10 +429,6 @@ static const struct net_device_ops ipheth_netdev_ops = {
424 .ndo_get_stats = &ipheth_stats, 429 .ndo_get_stats = &ipheth_stats,
425}; 430};
426 431
427static struct device_type ipheth_type = {
428 .name = "wwan",
429};
430
431static int ipheth_probe(struct usb_interface *intf, 432static int ipheth_probe(struct usb_interface *intf,
432 const struct usb_device_id *id) 433 const struct usb_device_id *id)
433{ 434{
@@ -445,7 +446,7 @@ static int ipheth_probe(struct usb_interface *intf,
445 446
446 netdev->netdev_ops = &ipheth_netdev_ops; 447 netdev->netdev_ops = &ipheth_netdev_ops;
447 netdev->watchdog_timeo = IPHETH_TX_TIMEOUT; 448 netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
448 strcpy(netdev->name, "wwan%d"); 449 strcpy(netdev->name, "eth%d");
449 450
450 dev = netdev_priv(netdev); 451 dev = netdev_priv(netdev);
451 dev->udev = udev; 452 dev->udev = udev;
@@ -495,7 +496,6 @@ static int ipheth_probe(struct usb_interface *intf,
495 496
496 SET_NETDEV_DEV(netdev, &intf->dev); 497 SET_NETDEV_DEV(netdev, &intf->dev);
497 SET_ETHTOOL_OPS(netdev, &ops); 498 SET_ETHTOOL_OPS(netdev, &ops);
498 SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
499 499
500 retval = register_netdev(netdev); 500 retval = register_netdev(netdev);
501 if (retval) { 501 if (retval) {
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index ed7f4f5c4062..6884813b809c 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2824,7 +2824,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2824 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2824 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2825 2825
2826 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2826 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2827 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG; 2827 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2828 2828
2829 ret = register_netdev(dev); 2829 ret = register_netdev(dev);
2830 if (ret < 0) 2830 if (ret < 0)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 45789c8990d3..116ac66c6e3e 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1327,6 +1327,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1327 PCI_DMA_TODEVICE); 1327 PCI_DMA_TODEVICE);
1328 1328
1329 rate = ieee80211_get_tx_rate(sc->hw, info); 1329 rate = ieee80211_get_tx_rate(sc->hw, info);
1330 if (!rate) {
1331 ret = -EINVAL;
1332 goto err_unmap;
1333 }
1330 1334
1331 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1335 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1332 flags |= AR5K_TXDESC_NOACK; 1336 flags |= AR5K_TXDESC_NOACK;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index b883b174385b..057fb69ddf7f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -797,7 +797,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
797 length = block[it+1]; 797 length = block[it+1];
798 length &= 0xff; 798 length &= 0xff;
799 799
800 if (length > 0 && spot >= 0 && spot+length < mdataSize) { 800 if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
801 ath_print(common, ATH_DBG_EEPROM, 801 ath_print(common, ATH_DBG_EEPROM,
802 "Restore at %d: spot=%d " 802 "Restore at %d: spot=%d "
803 "offset=%d length=%d\n", 803 "offset=%d length=%d\n",
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 7f48df1e2903..0b09db0f8e7d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -62,7 +62,7 @@
62 62
63#define SD_NO_CTL 0xE0 63#define SD_NO_CTL 0xE0
64#define NO_CTL 0xff 64#define NO_CTL 0xff
65#define CTL_MODE_M 7 65#define CTL_MODE_M 0xf
66#define CTL_11A 0 66#define CTL_11A 0
67#define CTL_11B 1 67#define CTL_11B 1
68#define CTL_11G 2 68#define CTL_11G 2
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index a1c39526161a..345dd9721b41 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -31,7 +31,6 @@ enum ctl_group {
31#define NO_CTL 0xff 31#define NO_CTL 0xff
32#define SD_NO_CTL 0xE0 32#define SD_NO_CTL 0xE0
33#define NO_CTL 0xff 33#define NO_CTL 0xff
34#define CTL_MODE_M 7
35#define CTL_11A 0 34#define CTL_11A 0
36#define CTL_11B 1 35#define CTL_11B 1
37#define CTL_11G 2 36#define CTL_11G 2
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 1bbaaa44d981..296fd00a5129 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -124,7 +124,7 @@ struct if_sdio_card {
124 bool helper_allocated; 124 bool helper_allocated;
125 bool firmware_allocated; 125 bool firmware_allocated;
126 126
127 u8 buffer[65536]; 127 u8 buffer[65536] __attribute__((aligned(4)));
128 128
129 spinlock_t lock; 129 spinlock_t lock;
130 struct if_sdio_packet *packets; 130 struct if_sdio_packet *packets;
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 8f67db487f57..76b2318a7dc7 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -446,7 +446,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
446 } 446 }
447 447
448 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && 448 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
449 (!payload->status)) 449 !(payload->status & P54_TX_FAILED))
450 info->flags |= IEEE80211_TX_STAT_ACK; 450 info->flags |= IEEE80211_TX_STAT_ACK;
451 if (payload->status & P54_TX_PSM_CANCELLED) 451 if (payload->status & P54_TX_PSM_CANCELLED)
452 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 452 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 45fcc1e96df9..3bc72d18b121 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -338,9 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
338 acpi_handle chandle, handle; 338 acpi_handle chandle, handle;
339 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; 339 struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
340 340
341 flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | 341 flags &= OSC_SHPC_NATIVE_HP_CONTROL;
342 OSC_SHPC_NATIVE_HP_CONTROL |
343 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
344 if (!flags) { 342 if (!flags) {
345 err("Invalid flags %u specified!\n", flags); 343 err("Invalid flags %u specified!\n", flags);
346 return -EINVAL; 344 return -EINVAL;
@@ -360,7 +358,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
360 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); 358 acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
361 dbg("Trying to get hotplug control for %s\n", 359 dbg("Trying to get hotplug control for %s\n",
362 (char *)string.pointer); 360 (char *)string.pointer);
363 status = acpi_pci_osc_control_set(handle, flags); 361 status = acpi_pci_osc_control_set(handle, &flags, flags);
364 if (ACPI_SUCCESS(status)) 362 if (ACPI_SUCCESS(status))
365 goto got_one; 363 goto got_one;
366 if (status == AE_SUPPORT) 364 if (status == AE_SUPPORT)
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4ed76b47b6dc..73d513989263 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -176,19 +176,11 @@ static inline void pciehp_firmware_init(void)
176{ 176{
177 pciehp_acpi_slot_detection_init(); 177 pciehp_acpi_slot_detection_init();
178} 178}
179
180static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
181{
182 int retval;
183 u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
184 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
185 retval = acpi_get_hp_hw_control_from_firmware(dev, flags);
186 if (retval)
187 return retval;
188 return pciehp_acpi_slot_detection_check(dev);
189}
190#else 179#else
191#define pciehp_firmware_init() do {} while (0) 180#define pciehp_firmware_init() do {} while (0)
192#define pciehp_get_hp_hw_control_from_firmware(dev) 0 181static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
182{
183 return 0;
184}
193#endif /* CONFIG_ACPI */ 185#endif /* CONFIG_ACPI */
194#endif /* _PCIEHP_H */ 186#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 1f4000a5a108..2574700db461 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -85,9 +85,7 @@ static int __init dummy_probe(struct pcie_device *dev)
85 acpi_handle handle; 85 acpi_handle handle;
86 struct dummy_slot *slot, *tmp; 86 struct dummy_slot *slot, *tmp;
87 struct pci_dev *pdev = dev->port; 87 struct pci_dev *pdev = dev->port;
88 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 88
89 if (pciehp_get_hp_hw_control_from_firmware(pdev))
90 return -ENODEV;
91 pos = pci_pcie_cap(pdev); 89 pos = pci_pcie_cap(pdev);
92 if (!pos) 90 if (!pos)
93 return -ENODEV; 91 return -ENODEV;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3588ea61b0dd..aa5f3ff629ff 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -59,7 +59,7 @@ module_param(pciehp_force, bool, 0644);
59MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); 59MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
60MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); 60MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
61MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); 61MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
62MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); 62MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
63 63
64#define PCIE_MODULE_NAME "pciehp" 64#define PCIE_MODULE_NAME "pciehp"
65 65
@@ -235,7 +235,7 @@ static int pciehp_probe(struct pcie_device *dev)
235 dev_info(&dev->device, 235 dev_info(&dev->device,
236 "Bypassing BIOS check for pciehp use on %s\n", 236 "Bypassing BIOS check for pciehp use on %s\n",
237 pci_name(dev->port)); 237 pci_name(dev->port));
238 else if (pciehp_get_hp_hw_control_from_firmware(dev->port)) 238 else if (pciehp_acpi_slot_detection_check(dev->port))
239 goto err_out_none; 239 goto err_out_none;
240 240
241 ctrl = pcie_init(dev); 241 ctrl = pcie_init(dev);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 679c39de6a89..7754a678ab15 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -140,8 +140,10 @@ static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
140 140
141#ifdef CONFIG_PCIEAER 141#ifdef CONFIG_PCIEAER
142void pci_no_aer(void); 142void pci_no_aer(void);
143bool pci_aer_available(void);
143#else 144#else
144static inline void pci_no_aer(void) { } 145static inline void pci_no_aer(void) { }
146static inline bool pci_aer_available(void) { return false; }
145#endif 147#endif
146 148
147static inline int pci_no_d1d2(struct pci_dev *dev) 149static inline int pci_no_d1d2(struct pci_dev *dev)
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index ea654545e7c4..00c62df5a9fc 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -6,10 +6,11 @@
6obj-$(CONFIG_PCIEASPM) += aspm.o 6obj-$(CONFIG_PCIEASPM) += aspm.o
7 7
8pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o 8pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
9pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o
9 10
10obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o 11obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
11 12
12# Build PCI Express AER if needed 13# Build PCI Express AER if needed
13obj-$(CONFIG_PCIEAER) += aer/ 14obj-$(CONFIG_PCIEAER) += aer/
14 15
15obj-$(CONFIG_PCIE_PME) += pme/ 16obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 484cc55194b8..f409948e1a9b 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -72,6 +72,11 @@ void pci_no_aer(void)
72 pcie_aer_disable = 1; /* has priority over 'forceload' */ 72 pcie_aer_disable = 1; /* has priority over 'forceload' */
73} 73}
74 74
75bool pci_aer_available(void)
76{
77 return !pcie_aer_disable && pci_msi_enabled();
78}
79
75static int set_device_error_reporting(struct pci_dev *dev, void *data) 80static int set_device_error_reporting(struct pci_dev *dev, void *data)
76{ 81{
77 bool enable = *((bool *)data); 82 bool enable = *((bool *)data);
@@ -411,9 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
411 */ 416 */
412static int __init aer_service_init(void) 417static int __init aer_service_init(void)
413{ 418{
414 if (pcie_aer_disable) 419 if (!pci_aer_available())
415 return -ENXIO;
416 if (!pci_msi_enabled())
417 return -ENXIO; 420 return -ENXIO;
418 return pcie_port_service_register(&aerdriver); 421 return pcie_port_service_register(&aerdriver);
419} 422}
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index f278d7b0d95d..2bb9b8972211 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -19,42 +19,6 @@
19#include <acpi/apei.h> 19#include <acpi/apei.h>
20#include "aerdrv.h" 20#include "aerdrv.h"
21 21
22/**
23 * aer_osc_setup - run ACPI _OSC method
24 * @pciedev: pcie_device which AER is being enabled on
25 *
26 * @return: Zero on success. Nonzero otherwise.
27 *
28 * Invoked when PCIe bus loads AER service driver. To avoid conflict with
29 * BIOS AER support requires BIOS to yield AER control to OS native driver.
30 **/
31int aer_osc_setup(struct pcie_device *pciedev)
32{
33 acpi_status status = AE_NOT_FOUND;
34 struct pci_dev *pdev = pciedev->port;
35 acpi_handle handle = NULL;
36
37 if (acpi_pci_disabled)
38 return -1;
39
40 handle = acpi_find_root_bridge_handle(pdev);
41 if (handle) {
42 status = acpi_pci_osc_control_set(handle,
43 OSC_PCI_EXPRESS_AER_CONTROL |
44 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
45 }
46
47 if (ACPI_FAILURE(status)) {
48 dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
49 "init device: %s\n",
50 (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
51 "no _OSC support" : "_OSC failed");
52 return -1;
53 }
54
55 return 0;
56}
57
58#ifdef CONFIG_ACPI_APEI 22#ifdef CONFIG_ACPI_APEI
59static inline int hest_match_pci(struct acpi_hest_aer_common *p, 23static inline int hest_match_pci(struct acpi_hest_aer_common *p,
60 struct pci_dev *pci) 24 struct pci_dev *pci)
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index fc0b5a93e1de..29e268fadf14 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -772,22 +772,10 @@ void aer_isr(struct work_struct *work)
772 */ 772 */
773int aer_init(struct pcie_device *dev) 773int aer_init(struct pcie_device *dev)
774{ 774{
775 if (pcie_aer_get_firmware_first(dev->port)) {
776 dev_printk(KERN_DEBUG, &dev->device,
777 "PCIe errors handled by platform firmware.\n");
778 goto out;
779 }
780
781 if (aer_osc_setup(dev))
782 goto out;
783
784 return 0;
785out:
786 if (forceload) { 775 if (forceload) {
787 dev_printk(KERN_DEBUG, &dev->device, 776 dev_printk(KERN_DEBUG, &dev->device,
788 "aerdrv forceload requested.\n"); 777 "aerdrv forceload requested.\n");
789 pcie_aer_force_firmware_first(dev->port, 0); 778 pcie_aer_force_firmware_first(dev->port, 0);
790 return 0;
791 } 779 }
792 return -ENXIO; 780 return 0;
793} 781}
diff --git a/drivers/pci/pcie/pme/pcie_pme.c b/drivers/pci/pcie/pme.c
index bbdea18693d9..2f3c90407227 100644
--- a/drivers/pci/pcie/pme/pcie_pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -23,38 +23,13 @@
23#include <linux/pci-acpi.h> 23#include <linux/pci-acpi.h>
24#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25 25
26#include "../../pci.h" 26#include "../pci.h"
27#include "pcie_pme.h" 27#include "portdrv.h"
28 28
29#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */ 29#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
30#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */ 30#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
31 31
32/* 32/*
33 * If set, this switch will prevent the PCIe root port PME service driver from
34 * being registered. Consequently, the interrupt-based PCIe PME signaling will
35 * not be used by any PCIe root ports in that case.
36 */
37static bool pcie_pme_disabled = true;
38
39/*
40 * The PCI Express Base Specification 2.0, Section 6.1.8, states the following:
41 * "In order to maintain compatibility with non-PCI Express-aware system
42 * software, system power management logic must be configured by firmware to use
43 * the legacy mechanism of signaling PME by default. PCI Express-aware system
44 * software must notify the firmware prior to enabling native, interrupt-based
45 * PME signaling." However, if the platform doesn't provide us with a suitable
46 * notification mechanism or the notification fails, it is not clear whether or
47 * not we are supposed to use the interrupt-based PCIe PME signaling. The
48 * switch below can be used to indicate the desired behaviour. When set, it
49 * will make the kernel use the interrupt-based PCIe PME signaling regardless of
50 * the platform notification status, although the kernel will attempt to notify
51 * the platform anyway. When unset, it will prevent the kernel from using the
52 * the interrupt-based PCIe PME signaling if the platform notification fails,
53 * which is the default.
54 */
55static bool pcie_pme_force_enable;
56
57/*
58 * If this switch is set, MSI will not be used for PCIe PME signaling. This 33 * If this switch is set, MSI will not be used for PCIe PME signaling. This
59 * causes the PCIe port driver to use INTx interrupts only, but it turns out 34 * causes the PCIe port driver to use INTx interrupts only, but it turns out
60 * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based 35 * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
@@ -64,38 +39,13 @@ bool pcie_pme_msi_disabled;
64 39
65static int __init pcie_pme_setup(char *str) 40static int __init pcie_pme_setup(char *str)
66{ 41{
67 if (!strncmp(str, "auto", 4)) 42 if (!strncmp(str, "nomsi", 5))
68 pcie_pme_disabled = false; 43 pcie_pme_msi_disabled = true;
69 else if (!strncmp(str, "force", 5))
70 pcie_pme_force_enable = true;
71
72 str = strchr(str, ',');
73 if (str) {
74 str++;
75 str += strspn(str, " \t");
76 if (*str && !strcmp(str, "nomsi"))
77 pcie_pme_msi_disabled = true;
78 }
79 44
80 return 1; 45 return 1;
81} 46}
82__setup("pcie_pme=", pcie_pme_setup); 47__setup("pcie_pme=", pcie_pme_setup);
83 48
84/**
85 * pcie_pme_platform_setup - Ensure that the kernel controls the PCIe PME.
86 * @srv: PCIe PME root port service to use for carrying out the check.
87 *
88 * Notify the platform that the native PCIe PME is going to be used and return
89 * 'true' if the control of the PCIe PME registers has been acquired from the
90 * platform.
91 */
92static bool pcie_pme_platform_setup(struct pcie_device *srv)
93{
94 if (!pcie_pme_platform_notify(srv))
95 return true;
96 return pcie_pme_force_enable;
97}
98
99struct pcie_pme_service_data { 49struct pcie_pme_service_data {
100 spinlock_t lock; 50 spinlock_t lock;
101 struct pcie_device *srv; 51 struct pcie_device *srv;
@@ -108,7 +58,7 @@ struct pcie_pme_service_data {
108 * @dev: PCIe root port or event collector. 58 * @dev: PCIe root port or event collector.
109 * @enable: Enable or disable the interrupt. 59 * @enable: Enable or disable the interrupt.
110 */ 60 */
111static void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable) 61void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
112{ 62{
113 int rtctl_pos; 63 int rtctl_pos;
114 u16 rtctl; 64 u16 rtctl;
@@ -417,9 +367,6 @@ static int pcie_pme_probe(struct pcie_device *srv)
417 struct pcie_pme_service_data *data; 367 struct pcie_pme_service_data *data;
418 int ret; 368 int ret;
419 369
420 if (!pcie_pme_platform_setup(srv))
421 return -EACCES;
422
423 data = kzalloc(sizeof(*data), GFP_KERNEL); 370 data = kzalloc(sizeof(*data), GFP_KERNEL);
424 if (!data) 371 if (!data)
425 return -ENOMEM; 372 return -ENOMEM;
@@ -509,8 +456,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
509 */ 456 */
510static int __init pcie_pme_service_init(void) 457static int __init pcie_pme_service_init(void)
511{ 458{
512 return pcie_pme_disabled ? 459 return pcie_port_service_register(&pcie_pme_driver);
513 -ENODEV : pcie_port_service_register(&pcie_pme_driver);
514} 460}
515 461
516module_init(pcie_pme_service_init); 462module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/pme/Makefile b/drivers/pci/pcie/pme/Makefile
deleted file mode 100644
index 8b9238053080..000000000000
--- a/drivers/pci/pcie/pme/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
1#
2# Makefile for PCI-Express Root Port PME signaling driver
3#
4
5obj-$(CONFIG_PCIE_PME) += pmedriver.o
6
7pmedriver-objs := pcie_pme.o
8pmedriver-$(CONFIG_ACPI) += pcie_pme_acpi.o
diff --git a/drivers/pci/pcie/pme/pcie_pme.h b/drivers/pci/pcie/pme/pcie_pme.h
deleted file mode 100644
index b30d2b7c9775..000000000000
--- a/drivers/pci/pcie/pme/pcie_pme.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * drivers/pci/pcie/pme/pcie_pme.h
3 *
4 * PCI Express Root Port PME signaling support
5 *
6 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7 */
8
9#ifndef _PCIE_PME_H_
10#define _PCIE_PME_H_
11
12struct pcie_device;
13
14#ifdef CONFIG_ACPI
15extern int pcie_pme_acpi_setup(struct pcie_device *srv);
16
17static inline int pcie_pme_platform_notify(struct pcie_device *srv)
18{
19 return pcie_pme_acpi_setup(srv);
20}
21#else /* !CONFIG_ACPI */
22static inline int pcie_pme_platform_notify(struct pcie_device *srv)
23{
24 return 0;
25}
26#endif /* !CONFIG_ACPI */
27
28#endif
diff --git a/drivers/pci/pcie/pme/pcie_pme_acpi.c b/drivers/pci/pcie/pme/pcie_pme_acpi.c
deleted file mode 100644
index 83ab2287ae3f..000000000000
--- a/drivers/pci/pcie/pme/pcie_pme_acpi.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * PCIe Native PME support, ACPI-related part
3 *
4 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License V2. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pci.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/acpi.h>
15#include <linux/pci-acpi.h>
16#include <linux/pcieport_if.h>
17
18/**
19 * pcie_pme_acpi_setup - Request the ACPI BIOS to release control over PCIe PME.
20 * @srv - PCIe PME service for a root port or event collector.
21 *
22 * Invoked when the PCIe bus type loads PCIe PME service driver. To avoid
23 * conflict with the BIOS PCIe support requires the BIOS to yield PCIe PME
24 * control to the kernel.
25 */
26int pcie_pme_acpi_setup(struct pcie_device *srv)
27{
28 acpi_status status = AE_NOT_FOUND;
29 struct pci_dev *port = srv->port;
30 acpi_handle handle;
31 int error = 0;
32
33 if (acpi_pci_disabled)
34 return -ENOSYS;
35
36 dev_info(&port->dev, "Requesting control of PCIe PME from ACPI BIOS\n");
37
38 handle = acpi_find_root_bridge_handle(port);
39 if (!handle)
40 return -EINVAL;
41
42 status = acpi_pci_osc_control_set(handle,
43 OSC_PCI_EXPRESS_PME_CONTROL |
44 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
45 if (ACPI_FAILURE(status)) {
46 dev_info(&port->dev,
47 "Failed to receive control of PCIe PME service: %s\n",
48 (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
49 "no _OSC support" : "ACPI _OSC failed");
50 error = -ENODEV;
51 }
52
53 return error;
54}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 813a5c3427b6..7b5aba0a3291 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,6 +20,9 @@
20 20
21#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 21#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
22 22
23extern bool pcie_ports_disabled;
24extern bool pcie_ports_auto;
25
23extern struct bus_type pcie_port_bus_type; 26extern struct bus_type pcie_port_bus_type;
24extern int pcie_port_device_register(struct pci_dev *dev); 27extern int pcie_port_device_register(struct pci_dev *dev);
25#ifdef CONFIG_PM 28#ifdef CONFIG_PM
@@ -30,6 +33,8 @@ extern void pcie_port_device_remove(struct pci_dev *dev);
30extern int __must_check pcie_port_bus_register(void); 33extern int __must_check pcie_port_bus_register(void);
31extern void pcie_port_bus_unregister(void); 34extern void pcie_port_bus_unregister(void);
32 35
36struct pci_dev;
37
33#ifdef CONFIG_PCIE_PME 38#ifdef CONFIG_PCIE_PME
34extern bool pcie_pme_msi_disabled; 39extern bool pcie_pme_msi_disabled;
35 40
@@ -42,9 +47,26 @@ static inline bool pcie_pme_no_msi(void)
42{ 47{
43 return pcie_pme_msi_disabled; 48 return pcie_pme_msi_disabled;
44} 49}
50
51extern void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
45#else /* !CONFIG_PCIE_PME */ 52#else /* !CONFIG_PCIE_PME */
46static inline void pcie_pme_disable_msi(void) {} 53static inline void pcie_pme_disable_msi(void) {}
47static inline bool pcie_pme_no_msi(void) { return false; } 54static inline bool pcie_pme_no_msi(void) { return false; }
55static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
48#endif /* !CONFIG_PCIE_PME */ 56#endif /* !CONFIG_PCIE_PME */
49 57
58#ifdef CONFIG_ACPI
59extern int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
60
61static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
62{
63 return pcie_port_acpi_setup(port, mask);
64}
65#else /* !CONFIG_ACPI */
66static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
67{
68 return 0;
69}
70#endif /* !CONFIG_ACPI */
71
50#endif /* _PORTDRV_H_ */ 72#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
new file mode 100644
index 000000000000..b7c4cb1ccb23
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -0,0 +1,77 @@
1/*
2 * PCIe Port Native Services Support, ACPI-Related Part
3 *
4 * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License V2. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/pci.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/acpi.h>
15#include <linux/pci-acpi.h>
16#include <linux/pcieport_if.h>
17
18#include "aer/aerdrv.h"
19#include "../pci.h"
20
21/**
22 * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
23 * @port: PCIe Port service for a root port or event collector.
24 * @srv_mask: Bit mask of services that can be enabled for @port.
25 *
26 * Invoked when @port is identified as a PCIe port device. To avoid conflicts
27 * with the BIOS PCIe port native services support requires the BIOS to yield
28 * control of these services to the kernel. The mask of services that the BIOS
29 * allows to be enabled for @port is written to @srv_mask.
30 *
31 * NOTE: It turns out that we cannot do that for individual port services
32 * separately, because that would make some systems work incorrectly.
33 */
34int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
35{
36 acpi_status status;
37 acpi_handle handle;
38 u32 flags;
39
40 if (acpi_pci_disabled)
41 return 0;
42
43 handle = acpi_find_root_bridge_handle(port);
44 if (!handle)
45 return -EINVAL;
46
47 flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
48 | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
49 | OSC_PCI_EXPRESS_PME_CONTROL;
50
51 if (pci_aer_available()) {
52 if (pcie_aer_get_firmware_first(port))
53 dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
54 else
55 flags |= OSC_PCI_EXPRESS_AER_CONTROL;
56 }
57
58 status = acpi_pci_osc_control_set(handle, &flags,
59 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
60 if (ACPI_FAILURE(status)) {
61 dev_dbg(&port->dev, "ACPI _OSC request failed (code %d)\n",
62 status);
63 return -ENODEV;
64 }
65
66 dev_info(&port->dev, "ACPI _OSC control granted for 0x%02x\n", flags);
67
68 *srv_mask = PCIE_PORT_SERVICE_VC;
69 if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
70 *srv_mask |= PCIE_PORT_SERVICE_HP;
71 if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
72 *srv_mask |= PCIE_PORT_SERVICE_PME;
73 if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
74 *srv_mask |= PCIE_PORT_SERVICE_AER;
75
76 return 0;
77}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index e73effbe402c..a9c222d79ebc 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -14,6 +14,8 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pcieport_if.h> 16#include <linux/pcieport_if.h>
17#include <linux/aer.h>
18#include <linux/pci-aspm.h>
17 19
18#include "../pci.h" 20#include "../pci.h"
19#include "portdrv.h" 21#include "portdrv.h"
@@ -236,24 +238,64 @@ static int get_port_device_capability(struct pci_dev *dev)
236 int services = 0, pos; 238 int services = 0, pos;
237 u16 reg16; 239 u16 reg16;
238 u32 reg32; 240 u32 reg32;
241 int cap_mask;
242 int err;
243
244 err = pcie_port_platform_notify(dev, &cap_mask);
245 if (pcie_ports_auto) {
246 if (err) {
247 pcie_no_aspm();
248 return 0;
249 }
250 } else {
251 cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
252 | PCIE_PORT_SERVICE_VC;
253 if (pci_aer_available())
254 cap_mask |= PCIE_PORT_SERVICE_AER;
255 }
239 256
240 pos = pci_pcie_cap(dev); 257 pos = pci_pcie_cap(dev);
241 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16); 258 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
242 /* Hot-Plug Capable */ 259 /* Hot-Plug Capable */
243 if (reg16 & PCI_EXP_FLAGS_SLOT) { 260 if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) {
244 pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32); 261 pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
245 if (reg32 & PCI_EXP_SLTCAP_HPC) 262 if (reg32 & PCI_EXP_SLTCAP_HPC) {
246 services |= PCIE_PORT_SERVICE_HP; 263 services |= PCIE_PORT_SERVICE_HP;
264 /*
265 * Disable hot-plug interrupts in case they have been
266 * enabled by the BIOS and the hot-plug service driver
267 * is not loaded.
268 */
269 pos += PCI_EXP_SLTCTL;
270 pci_read_config_word(dev, pos, &reg16);
271 reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
272 pci_write_config_word(dev, pos, reg16);
273 }
247 } 274 }
248 /* AER capable */ 275 /* AER capable */
249 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) 276 if ((cap_mask & PCIE_PORT_SERVICE_AER)
277 && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
250 services |= PCIE_PORT_SERVICE_AER; 278 services |= PCIE_PORT_SERVICE_AER;
279 /*
280 * Disable AER on this port in case it's been enabled by the
281 * BIOS (the AER service driver will enable it when necessary).
282 */
283 pci_disable_pcie_error_reporting(dev);
284 }
251 /* VC support */ 285 /* VC support */
252 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) 286 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
253 services |= PCIE_PORT_SERVICE_VC; 287 services |= PCIE_PORT_SERVICE_VC;
254 /* Root ports are capable of generating PME too */ 288 /* Root ports are capable of generating PME too */
255 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 289 if ((cap_mask & PCIE_PORT_SERVICE_PME)
290 && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
256 services |= PCIE_PORT_SERVICE_PME; 291 services |= PCIE_PORT_SERVICE_PME;
292 /*
293 * Disable PME interrupt on this port in case it's been enabled
294 * by the BIOS (the PME service driver will enable it when
295 * necessary).
296 */
297 pcie_pme_interrupt_enable(dev, false);
298 }
257 299
258 return services; 300 return services;
259} 301}
@@ -494,6 +536,9 @@ static void pcie_port_shutdown_service(struct device *dev) {}
494 */ 536 */
495int pcie_port_service_register(struct pcie_port_service_driver *new) 537int pcie_port_service_register(struct pcie_port_service_driver *new)
496{ 538{
539 if (pcie_ports_disabled)
540 return -ENODEV;
541
497 new->driver.name = (char *)new->name; 542 new->driver.name = (char *)new->name;
498 new->driver.bus = &pcie_port_bus_type; 543 new->driver.bus = &pcie_port_bus_type;
499 new->driver.probe = pcie_port_probe_service; 544 new->driver.probe = pcie_port_probe_service;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3debed25e46b..f9033e190fb6 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -15,6 +15,7 @@
15#include <linux/pcieport_if.h> 15#include <linux/pcieport_if.h>
16#include <linux/aer.h> 16#include <linux/aer.h>
17#include <linux/dmi.h> 17#include <linux/dmi.h>
18#include <linux/pci-aspm.h>
18 19
19#include "portdrv.h" 20#include "portdrv.h"
20#include "aer/aerdrv.h" 21#include "aer/aerdrv.h"
@@ -29,6 +30,31 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
29MODULE_DESCRIPTION(DRIVER_DESC); 30MODULE_DESCRIPTION(DRIVER_DESC);
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
33/* If this switch is set, PCIe port native services should not be enabled. */
34bool pcie_ports_disabled;
35
36/*
37 * If this switch is set, ACPI _OSC will be used to determine whether or not to
38 * enable PCIe port native services.
39 */
40bool pcie_ports_auto = true;
41
42static int __init pcie_port_setup(char *str)
43{
44 if (!strncmp(str, "compat", 6)) {
45 pcie_ports_disabled = true;
46 } else if (!strncmp(str, "native", 6)) {
47 pcie_ports_disabled = false;
48 pcie_ports_auto = false;
49 } else if (!strncmp(str, "auto", 4)) {
50 pcie_ports_disabled = false;
51 pcie_ports_auto = true;
52 }
53
54 return 1;
55}
56__setup("pcie_ports=", pcie_port_setup);
57
32/* global data */ 58/* global data */
33 59
34static int pcie_portdrv_restore_config(struct pci_dev *dev) 60static int pcie_portdrv_restore_config(struct pci_dev *dev)
@@ -301,6 +327,11 @@ static int __init pcie_portdrv_init(void)
301{ 327{
302 int retval; 328 int retval;
303 329
330 if (pcie_ports_disabled) {
331 pcie_no_aspm();
332 return -EACCES;
333 }
334
304 dmi_check_system(pcie_portdrv_dmi_table); 335 dmi_check_system(pcie_portdrv_dmi_table);
305 336
306 retval = pcie_port_bus_register(); 337 retval = pcie_port_bus_register();
@@ -315,11 +346,4 @@ static int __init pcie_portdrv_init(void)
315 return retval; 346 return retval;
316} 347}
317 348
318static void __exit pcie_portdrv_exit(void)
319{
320 pci_unregister_driver(&pcie_portdriver);
321 pcie_port_bus_unregister();
322}
323
324module_init(pcie_portdrv_init); 349module_init(pcie_portdrv_init);
325module_exit(pcie_portdrv_exit);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 659eaa0fc48f..968cfea04f74 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -49,7 +49,7 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
49} 49}
50 50
51/* these strings match up with the values in pci_bus_speed */ 51/* these strings match up with the values in pci_bus_speed */
52static char *pci_bus_speed_strings[] = { 52static const char *pci_bus_speed_strings[] = {
53 "33 MHz PCI", /* 0x00 */ 53 "33 MHz PCI", /* 0x00 */
54 "66 MHz PCI", /* 0x01 */ 54 "66 MHz PCI", /* 0x01 */
55 "66 MHz PCI-X", /* 0x02 */ 55 "66 MHz PCI-X", /* 0x02 */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 044f430f3b43..cff7cc2c1f02 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -486,10 +486,12 @@ config TOPSTAR_LAPTOP
486config ACPI_TOSHIBA 486config ACPI_TOSHIBA
487 tristate "Toshiba Laptop Extras" 487 tristate "Toshiba Laptop Extras"
488 depends on ACPI 488 depends on ACPI
489 depends on LEDS_CLASS
490 depends on NEW_LEDS
491 depends on BACKLIGHT_CLASS_DEVICE
489 depends on INPUT 492 depends on INPUT
490 depends on RFKILL || RFKILL = n 493 depends on RFKILL || RFKILL = n
491 select INPUT_POLLDEV 494 select INPUT_POLLDEV
492 select BACKLIGHT_CLASS_DEVICE
493 ---help--- 495 ---help---
494 This driver adds support for access to certain system settings 496 This driver adds support for access to certain system settings
495 on "legacy free" Toshiba laptops. These laptops can be recognized by 497 on "legacy free" Toshiba laptops. These laptops can be recognized by
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index e058c2ba2a15..ca05aefd03bf 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -938,10 +938,11 @@ static int set_brightness(int value)
938 /* SPLV laptop */ 938 /* SPLV laptop */
939 if (hotk->methods->brightness_set) { 939 if (hotk->methods->brightness_set) {
940 if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set, 940 if (!write_acpi_int(hotk->handle, hotk->methods->brightness_set,
941 value, NULL)) 941 value, NULL)) {
942 printk(KERN_WARNING 942 printk(KERN_WARNING
943 "Asus ACPI: Error changing brightness\n"); 943 "Asus ACPI: Error changing brightness\n");
944 ret = -EIO; 944 ret = -EIO;
945 }
945 goto out; 946 goto out;
946 } 947 }
947 948
@@ -953,10 +954,11 @@ static int set_brightness(int value)
953 hotk->methods->brightness_down, 954 hotk->methods->brightness_down,
954 NULL, NULL); 955 NULL, NULL);
955 (value > 0) ? value-- : value++; 956 (value > 0) ? value-- : value++;
956 if (ACPI_FAILURE(status)) 957 if (ACPI_FAILURE(status)) {
957 printk(KERN_WARNING 958 printk(KERN_WARNING
958 "Asus ACPI: Error changing brightness\n"); 959 "Asus ACPI: Error changing brightness\n");
959 ret = -EIO; 960 ret = -EIO;
961 }
960 } 962 }
961out: 963out:
962 return ret; 964 return ret;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index d071ce056322..097083cac413 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -841,6 +841,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = {
841 .callback = dmi_check_cb 841 .callback = dmi_check_cb
842 }, 842 },
843 { 843 {
844 .ident = "Dell Mini 1012",
845 .matches = {
846 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
847 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
848 },
849 .callback = dmi_check_cb
850 },
851 {
844 .ident = "Dell Inspiron 11z", 852 .ident = "Dell Inspiron 11z",
845 .matches = { 853 .matches = {
846 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 854 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1092,5 +1100,6 @@ MODULE_ALIAS("dmi:*:rnJHL90:rvrREFERENCE:*");
1092MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*"); 1100MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*");
1093MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*"); 1101MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*");
1094MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*"); 1102MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*");
1103MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*");
1095MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*"); 1104MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*");
1096MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*"); 1105MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*");
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index b41ed5cab3e7..4413975912e0 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -122,6 +122,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
122 }, 122 },
123 }, 123 },
124 { 124 {
125 .ident = "Dell Mini 1012",
126 .matches = {
127 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
128 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
129 },
130 },
131 {
125 .ident = "Dell Inspiron 11z", 132 .ident = "Dell Inspiron 11z",
126 .matches = { 133 .matches = {
127 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 134 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index f15516374987..c1741142a4cb 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -79,12 +79,13 @@ struct bios_args {
79 u32 command; 79 u32 command;
80 u32 commandtype; 80 u32 commandtype;
81 u32 datasize; 81 u32 datasize;
82 char *data; 82 u32 data;
83}; 83};
84 84
85struct bios_return { 85struct bios_return {
86 u32 sigpass; 86 u32 sigpass;
87 u32 return_code; 87 u32 return_code;
88 u32 value;
88}; 89};
89 90
90struct key_entry { 91struct key_entry {
@@ -148,7 +149,7 @@ static struct platform_driver hp_wmi_driver = {
148 * buffer = kzalloc(128, GFP_KERNEL); 149 * buffer = kzalloc(128, GFP_KERNEL);
149 * ret = hp_wmi_perform_query(0x7, 0, buffer, 128) 150 * ret = hp_wmi_perform_query(0x7, 0, buffer, 128)
150 */ 151 */
151static int hp_wmi_perform_query(int query, int write, char *buffer, 152static int hp_wmi_perform_query(int query, int write, u32 *buffer,
152 int buffersize) 153 int buffersize)
153{ 154{
154 struct bios_return bios_return; 155 struct bios_return bios_return;
@@ -159,7 +160,7 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
159 .command = write ? 0x2 : 0x1, 160 .command = write ? 0x2 : 0x1,
160 .commandtype = query, 161 .commandtype = query,
161 .datasize = buffersize, 162 .datasize = buffersize,
162 .data = buffer, 163 .data = *buffer,
163 }; 164 };
164 struct acpi_buffer input = { sizeof(struct bios_args), &args }; 165 struct acpi_buffer input = { sizeof(struct bios_args), &args };
165 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 166 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -177,29 +178,14 @@ static int hp_wmi_perform_query(int query, int write, char *buffer,
177 178
178 bios_return = *((struct bios_return *)obj->buffer.pointer); 179 bios_return = *((struct bios_return *)obj->buffer.pointer);
179 180
180 if (bios_return.return_code) { 181 memcpy(buffer, &bios_return.value, sizeof(bios_return.value));
181 printk(KERN_WARNING PREFIX "Query %d returned %d\n", query,
182 bios_return.return_code);
183 kfree(obj);
184 return bios_return.return_code;
185 }
186 if (obj->buffer.length - sizeof(bios_return) > buffersize) {
187 kfree(obj);
188 return -EINVAL;
189 }
190
191 memset(buffer, 0, buffersize);
192 memcpy(buffer,
193 ((char *)obj->buffer.pointer) + sizeof(struct bios_return),
194 obj->buffer.length - sizeof(bios_return));
195 kfree(obj);
196 return 0; 182 return 0;
197} 183}
198 184
199static int hp_wmi_display_state(void) 185static int hp_wmi_display_state(void)
200{ 186{
201 int state; 187 int state = 0;
202 int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, (char *)&state, 188 int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
203 sizeof(state)); 189 sizeof(state));
204 if (ret) 190 if (ret)
205 return -EINVAL; 191 return -EINVAL;
@@ -208,8 +194,8 @@ static int hp_wmi_display_state(void)
208 194
209static int hp_wmi_hddtemp_state(void) 195static int hp_wmi_hddtemp_state(void)
210{ 196{
211 int state; 197 int state = 0;
212 int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, (char *)&state, 198 int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
213 sizeof(state)); 199 sizeof(state));
214 if (ret) 200 if (ret)
215 return -EINVAL; 201 return -EINVAL;
@@ -218,8 +204,8 @@ static int hp_wmi_hddtemp_state(void)
218 204
219static int hp_wmi_als_state(void) 205static int hp_wmi_als_state(void)
220{ 206{
221 int state; 207 int state = 0;
222 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, (char *)&state, 208 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
223 sizeof(state)); 209 sizeof(state));
224 if (ret) 210 if (ret)
225 return -EINVAL; 211 return -EINVAL;
@@ -228,8 +214,8 @@ static int hp_wmi_als_state(void)
228 214
229static int hp_wmi_dock_state(void) 215static int hp_wmi_dock_state(void)
230{ 216{
231 int state; 217 int state = 0;
232 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state, 218 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
233 sizeof(state)); 219 sizeof(state));
234 220
235 if (ret) 221 if (ret)
@@ -240,8 +226,8 @@ static int hp_wmi_dock_state(void)
240 226
241static int hp_wmi_tablet_state(void) 227static int hp_wmi_tablet_state(void)
242{ 228{
243 int state; 229 int state = 0;
244 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, (char *)&state, 230 int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
245 sizeof(state)); 231 sizeof(state));
246 if (ret) 232 if (ret)
247 return ret; 233 return ret;
@@ -256,7 +242,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
256 int ret; 242 int ret;
257 243
258 ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, 244 ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
259 (char *)&query, sizeof(query)); 245 &query, sizeof(query));
260 if (ret) 246 if (ret)
261 return -EINVAL; 247 return -EINVAL;
262 return 0; 248 return 0;
@@ -268,10 +254,10 @@ static const struct rfkill_ops hp_wmi_rfkill_ops = {
268 254
269static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) 255static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
270{ 256{
271 int wireless; 257 int wireless = 0;
272 int mask; 258 int mask;
273 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 259 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
274 (char *)&wireless, sizeof(wireless)); 260 &wireless, sizeof(wireless));
275 /* TBD: Pass error */ 261 /* TBD: Pass error */
276 262
277 mask = 0x200 << (r * 8); 263 mask = 0x200 << (r * 8);
@@ -284,10 +270,10 @@ static bool hp_wmi_get_sw_state(enum hp_wmi_radio r)
284 270
285static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) 271static bool hp_wmi_get_hw_state(enum hp_wmi_radio r)
286{ 272{
287 int wireless; 273 int wireless = 0;
288 int mask; 274 int mask;
289 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, 275 hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0,
290 (char *)&wireless, sizeof(wireless)); 276 &wireless, sizeof(wireless));
291 /* TBD: Pass error */ 277 /* TBD: Pass error */
292 278
293 mask = 0x800 << (r * 8); 279 mask = 0x800 << (r * 8);
@@ -347,7 +333,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
347 const char *buf, size_t count) 333 const char *buf, size_t count)
348{ 334{
349 u32 tmp = simple_strtoul(buf, NULL, 10); 335 u32 tmp = simple_strtoul(buf, NULL, 10);
350 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, (char *)&tmp, 336 int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
351 sizeof(tmp)); 337 sizeof(tmp));
352 if (ret) 338 if (ret)
353 return -EINVAL; 339 return -EINVAL;
@@ -421,7 +407,7 @@ static void hp_wmi_notify(u32 value, void *context)
421 static struct key_entry *key; 407 static struct key_entry *key;
422 union acpi_object *obj; 408 union acpi_object *obj;
423 u32 event_id, event_data; 409 u32 event_id, event_data;
424 int key_code, ret; 410 int key_code = 0, ret;
425 u32 *location; 411 u32 *location;
426 acpi_status status; 412 acpi_status status;
427 413
@@ -475,7 +461,7 @@ static void hp_wmi_notify(u32 value, void *context)
475 break; 461 break;
476 case HPWMI_BEZEL_BUTTON: 462 case HPWMI_BEZEL_BUTTON:
477 ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, 463 ret = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0,
478 (char *)&key_code, 464 &key_code,
479 sizeof(key_code)); 465 sizeof(key_code));
480 if (ret) 466 if (ret)
481 break; 467 break;
@@ -578,9 +564,9 @@ static void cleanup_sysfs(struct platform_device *device)
578static int __devinit hp_wmi_bios_setup(struct platform_device *device) 564static int __devinit hp_wmi_bios_setup(struct platform_device *device)
579{ 565{
580 int err; 566 int err;
581 int wireless; 567 int wireless = 0;
582 568
583 err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, (char *)&wireless, 569 err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 0, &wireless,
584 sizeof(wireless)); 570 sizeof(wireless));
585 if (err) 571 if (err)
586 return err; 572 return err;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index afe82e50dfea..9024480a8228 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1342,8 +1342,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips)
1342 limits = &ips_lv_limits; 1342 limits = &ips_lv_limits;
1343 else if (strstr(boot_cpu_data.x86_model_id, "CPU U")) 1343 else if (strstr(boot_cpu_data.x86_model_id, "CPU U"))
1344 limits = &ips_ulv_limits; 1344 limits = &ips_ulv_limits;
1345 else 1345 else {
1346 dev_info(&ips->dev->dev, "No CPUID match found.\n"); 1346 dev_info(&ips->dev->dev, "No CPUID match found.\n");
1347 goto out;
1348 }
1347 1349
1348 rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); 1350 rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power);
1349 tdp = turbo_power & TURBO_TDP_MASK; 1351 tdp = turbo_power & TURBO_TDP_MASK;
@@ -1432,6 +1434,12 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
1432 1434
1433 spin_lock_init(&ips->turbo_status_lock); 1435 spin_lock_init(&ips->turbo_status_lock);
1434 1436
1437 ret = pci_enable_device(dev);
1438 if (ret) {
1439 dev_err(&dev->dev, "can't enable PCI device, aborting\n");
1440 goto error_free;
1441 }
1442
1435 if (!pci_resource_start(dev, 0)) { 1443 if (!pci_resource_start(dev, 0)) {
1436 dev_err(&dev->dev, "TBAR not assigned, aborting\n"); 1444 dev_err(&dev->dev, "TBAR not assigned, aborting\n");
1437 ret = -ENXIO; 1445 ret = -ENXIO;
@@ -1444,11 +1452,6 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
1444 goto error_free; 1452 goto error_free;
1445 } 1453 }
1446 1454
1447 ret = pci_enable_device(dev);
1448 if (ret) {
1449 dev_err(&dev->dev, "can't enable PCI device, aborting\n");
1450 goto error_free;
1451 }
1452 1455
1453 ips->regmap = ioremap(pci_resource_start(dev, 0), 1456 ips->regmap = ioremap(pci_resource_start(dev, 0),
1454 pci_resource_len(dev, 0)); 1457 pci_resource_len(dev, 0));
diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c
index 73f8e6d72669..2b11a33325e6 100644
--- a/drivers/platform/x86/intel_rar_register.c
+++ b/drivers/platform/x86/intel_rar_register.c
@@ -145,7 +145,7 @@ static void free_rar_device(struct rar_device *rar)
145 */ 145 */
146static struct rar_device *_rar_to_device(int rar, int *off) 146static struct rar_device *_rar_to_device(int rar, int *off)
147{ 147{
148 if (rar >= 0 && rar <= 3) { 148 if (rar >= 0 && rar < MRST_NUM_RAR) {
149 *off = rar; 149 *off = rar;
150 return &my_rar_device; 150 return &my_rar_device;
151 } 151 }
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 943f9084dcb1..6abe18e638e9 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -487,7 +487,7 @@ int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data)
487 mdelay(1); 487 mdelay(1);
488 *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR); 488 *data = readl(ipcdev.i2c_base + I2C_DATA_ADDR);
489 } else if (cmd == IPC_I2C_WRITE) { 489 } else if (cmd == IPC_I2C_WRITE) {
490 writel(addr, ipcdev.i2c_base + I2C_DATA_ADDR); 490 writel(*data, ipcdev.i2c_base + I2C_DATA_ADDR);
491 mdelay(1); 491 mdelay(1);
492 writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR); 492 writel(addr, ipcdev.i2c_base + IPC_I2C_CNTRL_ADDR);
493 } else { 493 } else {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 5d6119bed00c..e35ed128bdef 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1911,6 +1911,17 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
1911 TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, 1911 TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
1912 TP_ACPI_HOTKEYSCAN_MUTE, 1912 TP_ACPI_HOTKEYSCAN_MUTE,
1913 TP_ACPI_HOTKEYSCAN_THINKPAD, 1913 TP_ACPI_HOTKEYSCAN_THINKPAD,
1914 TP_ACPI_HOTKEYSCAN_UNK1,
1915 TP_ACPI_HOTKEYSCAN_UNK2,
1916 TP_ACPI_HOTKEYSCAN_UNK3,
1917 TP_ACPI_HOTKEYSCAN_UNK4,
1918 TP_ACPI_HOTKEYSCAN_UNK5,
1919 TP_ACPI_HOTKEYSCAN_UNK6,
1920 TP_ACPI_HOTKEYSCAN_UNK7,
1921 TP_ACPI_HOTKEYSCAN_UNK8,
1922
1923 /* Hotkey keymap size */
1924 TPACPI_HOTKEY_MAP_LEN
1914}; 1925};
1915 1926
1916enum { /* Keys/events available through NVRAM polling */ 1927enum { /* Keys/events available through NVRAM polling */
@@ -3082,6 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
3082 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ 3093 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
3083}; 3094};
3084 3095
3096typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN];
3097
3085static int __init hotkey_init(struct ibm_init_struct *iibm) 3098static int __init hotkey_init(struct ibm_init_struct *iibm)
3086{ 3099{
3087 /* Requirements for changing the default keymaps: 3100 /* Requirements for changing the default keymaps:
@@ -3113,9 +3126,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3113 * If the above is too much to ask, don't change the keymap. 3126 * If the above is too much to ask, don't change the keymap.
3114 * Ask the thinkpad-acpi maintainer to do it, instead. 3127 * Ask the thinkpad-acpi maintainer to do it, instead.
3115 */ 3128 */
3116 static u16 ibm_keycode_map[] __initdata = { 3129
3130 enum keymap_index {
3131 TPACPI_KEYMAP_IBM_GENERIC = 0,
3132 TPACPI_KEYMAP_LENOVO_GENERIC,
3133 };
3134
3135 static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = {
3136 /* Generic keymap for IBM ThinkPads */
3137 [TPACPI_KEYMAP_IBM_GENERIC] = {
3117 /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ 3138 /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
3118 KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP, 3139 KEY_FN_F1, KEY_BATTERY, KEY_COFFEE, KEY_SLEEP,
3119 KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, 3140 KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
3120 KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, 3141 KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
3121 3142
@@ -3146,11 +3167,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3146 /* (assignments unknown, please report if found) */ 3167 /* (assignments unknown, please report if found) */
3147 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, 3168 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
3148 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, 3169 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
3149 }; 3170 },
3150 static u16 lenovo_keycode_map[] __initdata = { 3171
3172 /* Generic keymap for Lenovo ThinkPads */
3173 [TPACPI_KEYMAP_LENOVO_GENERIC] = {
3151 /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ 3174 /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
3152 KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP, 3175 KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP,
3153 KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, 3176 KEY_WLAN, KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
3154 KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, 3177 KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
3155 3178
3156 /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */ 3179 /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
@@ -3189,11 +3212,25 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3189 /* (assignments unknown, please report if found) */ 3212 /* (assignments unknown, please report if found) */
3190 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, 3213 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
3191 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, 3214 KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
3215 },
3216 };
3217
3218 static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = {
3219 /* Generic maps (fallback) */
3220 {
3221 .vendor = PCI_VENDOR_ID_IBM,
3222 .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
3223 .quirks = TPACPI_KEYMAP_IBM_GENERIC,
3224 },
3225 {
3226 .vendor = PCI_VENDOR_ID_LENOVO,
3227 .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY,
3228 .quirks = TPACPI_KEYMAP_LENOVO_GENERIC,
3229 },
3192 }; 3230 };
3193 3231
3194#define TPACPI_HOTKEY_MAP_LEN ARRAY_SIZE(ibm_keycode_map) 3232#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t)
3195#define TPACPI_HOTKEY_MAP_SIZE sizeof(ibm_keycode_map) 3233#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0])
3196#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(ibm_keycode_map[0])
3197 3234
3198 int res, i; 3235 int res, i;
3199 int status; 3236 int status;
@@ -3202,6 +3239,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3202 bool tabletsw_state = false; 3239 bool tabletsw_state = false;
3203 3240
3204 unsigned long quirks; 3241 unsigned long quirks;
3242 unsigned long keymap_id;
3205 3243
3206 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3244 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3207 "initializing hotkey subdriver\n"); 3245 "initializing hotkey subdriver\n");
@@ -3342,7 +3380,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3342 goto err_exit; 3380 goto err_exit;
3343 3381
3344 /* Set up key map */ 3382 /* Set up key map */
3345
3346 hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, 3383 hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
3347 GFP_KERNEL); 3384 GFP_KERNEL);
3348 if (!hotkey_keycode_map) { 3385 if (!hotkey_keycode_map) {
@@ -3352,17 +3389,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3352 goto err_exit; 3389 goto err_exit;
3353 } 3390 }
3354 3391
3355 if (tpacpi_is_lenovo()) { 3392 keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable,
3356 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3393 ARRAY_SIZE(tpacpi_keymap_qtable));
3357 "using Lenovo default hot key map\n"); 3394 BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps));
3358 memcpy(hotkey_keycode_map, &lenovo_keycode_map, 3395 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3359 TPACPI_HOTKEY_MAP_SIZE); 3396 "using keymap number %lu\n", keymap_id);
3360 } else { 3397
3361 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3398 memcpy(hotkey_keycode_map, &tpacpi_keymaps[keymap_id],
3362 "using IBM default hot key map\n"); 3399 TPACPI_HOTKEY_MAP_SIZE);
3363 memcpy(hotkey_keycode_map, &ibm_keycode_map,
3364 TPACPI_HOTKEY_MAP_SIZE);
3365 }
3366 3400
3367 input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN); 3401 input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN);
3368 tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE; 3402 tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE;
@@ -3469,7 +3503,8 @@ static bool hotkey_notify_hotkey(const u32 hkey,
3469 *send_acpi_ev = true; 3503 *send_acpi_ev = true;
3470 *ignore_acpi_ev = false; 3504 *ignore_acpi_ev = false;
3471 3505
3472 if (scancode > 0 && scancode < 0x21) { 3506 /* HKEY event 0x1001 is scancode 0x00 */
3507 if (scancode > 0 && scancode <= TPACPI_HOTKEY_MAP_LEN) {
3473 scancode--; 3508 scancode--;
3474 if (!(hotkey_source_mask & (1 << scancode))) { 3509 if (!(hotkey_source_mask & (1 << scancode))) {
3475 tpacpi_input_send_key_masked(scancode); 3510 tpacpi_input_send_key_masked(scancode);
@@ -6080,13 +6115,18 @@ static struct backlight_ops ibm_backlight_data = {
6080 6115
6081/* --------------------------------------------------------------------- */ 6116/* --------------------------------------------------------------------- */
6082 6117
6118/*
6119 * Call _BCL method of video device. On some ThinkPads this will
6120 * switch the firmware to the ACPI brightness control mode.
6121 */
6122
6083static int __init tpacpi_query_bcl_levels(acpi_handle handle) 6123static int __init tpacpi_query_bcl_levels(acpi_handle handle)
6084{ 6124{
6085 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 6125 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
6086 union acpi_object *obj; 6126 union acpi_object *obj;
6087 int rc; 6127 int rc;
6088 6128
6089 if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) { 6129 if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
6090 obj = (union acpi_object *)buffer.pointer; 6130 obj = (union acpi_object *)buffer.pointer;
6091 if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) { 6131 if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
6092 printk(TPACPI_ERR "Unknown _BCL data, " 6132 printk(TPACPI_ERR "Unknown _BCL data, "
@@ -6103,55 +6143,22 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
6103 return rc; 6143 return rc;
6104} 6144}
6105 6145
6106static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
6107 u32 lvl, void *context, void **rv)
6108{
6109 char name[ACPI_PATH_SEGMENT_LENGTH];
6110 struct acpi_buffer buffer = { sizeof(name), &name };
6111
6112 if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
6113 !strncmp("_BCL", name, sizeof(name) - 1)) {
6114 BUG_ON(!rv || !*rv);
6115 **(int **)rv = tpacpi_query_bcl_levels(handle);
6116 return AE_CTRL_TERMINATE;
6117 } else {
6118 return AE_OK;
6119 }
6120}
6121 6146
6122/* 6147/*
6123 * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map 6148 * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
6124 */ 6149 */
6125static unsigned int __init tpacpi_check_std_acpi_brightness_support(void) 6150static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
6126{ 6151{
6127 int status; 6152 acpi_handle video_device;
6128 int bcl_levels = 0; 6153 int bcl_levels = 0;
6129 void *bcl_ptr = &bcl_levels;
6130
6131 if (!vid_handle)
6132 TPACPI_ACPIHANDLE_INIT(vid);
6133
6134 if (!vid_handle)
6135 return 0;
6136
6137 /*
6138 * Search for a _BCL method, and execute it. This is safe on all
6139 * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
6140 * BIOS in ACPI backlight control mode. We do NOT have to care
6141 * about calling the _BCL method in an enabled video device, any
6142 * will do for our purposes.
6143 */
6144 6154
6145 status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3, 6155 tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
6146 tpacpi_acpi_walk_find_bcl, NULL, NULL, 6156 if (video_device)
6147 &bcl_ptr); 6157 bcl_levels = tpacpi_query_bcl_levels(video_device);
6148 6158
6149 if (ACPI_SUCCESS(status) && bcl_levels > 2) { 6159 tp_features.bright_acpimode = (bcl_levels > 0);
6150 tp_features.bright_acpimode = 1;
6151 return bcl_levels - 2;
6152 }
6153 6160
6154 return 0; 6161 return (bcl_levels > 2) ? (bcl_levels - 2) : 0;
6155} 6162}
6156 6163
6157/* 6164/*
@@ -6244,28 +6251,6 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6244 if (tp_features.bright_unkfw) 6251 if (tp_features.bright_unkfw)
6245 return 1; 6252 return 1;
6246 6253
6247 if (tp_features.bright_acpimode) {
6248 if (acpi_video_backlight_support()) {
6249 if (brightness_enable > 1) {
6250 printk(TPACPI_NOTICE
6251 "Standard ACPI backlight interface "
6252 "available, not loading native one.\n");
6253 return 1;
6254 } else if (brightness_enable == 1) {
6255 printk(TPACPI_NOTICE
6256 "Backlight control force enabled, even if standard "
6257 "ACPI backlight interface is available\n");
6258 }
6259 } else {
6260 if (brightness_enable > 1) {
6261 printk(TPACPI_NOTICE
6262 "Standard ACPI backlight interface not "
6263 "available, thinkpad_acpi native "
6264 "brightness control enabled\n");
6265 }
6266 }
6267 }
6268
6269 if (!brightness_enable) { 6254 if (!brightness_enable) {
6270 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, 6255 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
6271 "brightness support disabled by " 6256 "brightness support disabled by "
@@ -6273,6 +6258,26 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6273 return 1; 6258 return 1;
6274 } 6259 }
6275 6260
6261 if (acpi_video_backlight_support()) {
6262 if (brightness_enable > 1) {
6263 printk(TPACPI_INFO
6264 "Standard ACPI backlight interface "
6265 "available, not loading native one.\n");
6266 return 1;
6267 } else if (brightness_enable == 1) {
6268 printk(TPACPI_WARN
6269 "Cannot enable backlight brightness support, "
6270 "ACPI is already handling it. Refer to the "
6271 "acpi_backlight kernel parameter\n");
6272 return 1;
6273 }
6274 } else if (tp_features.bright_acpimode && brightness_enable > 1) {
6275 printk(TPACPI_NOTICE
6276 "Standard ACPI backlight interface not "
6277 "available, thinkpad_acpi native "
6278 "brightness control enabled\n");
6279 }
6280
6276 /* 6281 /*
6277 * Check for module parameter bogosity, note that we 6282 * Check for module parameter bogosity, note that we
6278 * init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be 6283 * init brightness_mode to TPACPI_BRGHT_MODE_MAX in order to be
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index c6cbcb3f925e..0e9a309b9669 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -16,12 +16,11 @@
16 16
17#ifdef CONFIG_MAGIC_SYSRQ 17#ifdef CONFIG_MAGIC_SYSRQ
18static int ctrlchar_sysrq_key; 18static int ctrlchar_sysrq_key;
19static struct tty_struct *sysrq_tty;
20 19
21static void 20static void
22ctrlchar_handle_sysrq(struct work_struct *work) 21ctrlchar_handle_sysrq(struct work_struct *work)
23{ 22{
24 handle_sysrq(ctrlchar_sysrq_key, sysrq_tty); 23 handle_sysrq(ctrlchar_sysrq_key);
25} 24}
26 25
27static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq); 26static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
@@ -54,7 +53,6 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
54 /* racy */ 53 /* racy */
55 if (len == 3 && buf[1] == '-') { 54 if (len == 3 && buf[1] == '-') {
56 ctrlchar_sysrq_key = buf[2]; 55 ctrlchar_sysrq_key = buf[2];
57 sysrq_tty = tty;
58 schedule_work(&ctrlchar_work); 56 schedule_work(&ctrlchar_work);
59 return CTRLCHAR_SYSRQ; 57 return CTRLCHAR_SYSRQ;
60 } 58 }
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 18d9a497863b..8cd58e412b5e 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -305,7 +305,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
305 if (kbd->sysrq) { 305 if (kbd->sysrq) {
306 if (kbd->sysrq == K(KT_LATIN, '-')) { 306 if (kbd->sysrq == K(KT_LATIN, '-')) {
307 kbd->sysrq = 0; 307 kbd->sysrq = 0;
308 handle_sysrq(value, kbd->tty); 308 handle_sysrq(value);
309 return; 309 return;
310 } 310 }
311 if (value == '-') { 311 if (value == '-') {
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 95a895dd4f13..c8dc392edd57 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -56,6 +56,7 @@
56#include <linux/delay.h> 56#include <linux/delay.h>
57#include <linux/dma-mapping.h> 57#include <linux/dma-mapping.h>
58#include <linux/timer.h> 58#include <linux/timer.h>
59#include <linux/slab.h>
59#include <linux/pci.h> 60#include <linux/pci.h>
60#include <linux/aer.h> 61#include <linux/aer.h>
61#include <asm/dma.h> 62#include <asm/dma.h>
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index f065204e401b..95a26fb1626c 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -132,7 +132,7 @@ void qla4_8xxx_idc_unlock(struct scsi_qla_host *ha);
132int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); 132int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
133void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); 133void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
134void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); 134void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
135inline void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); 135void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
136 136
137extern int ql4xextended_error_logging; 137extern int ql4xextended_error_logging;
138extern int ql4xdiscoverywait; 138extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e031a734836e..5d4a3822382d 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1418,7 +1418,7 @@ static int qla4_8xxx_rcvpeg_ready(struct scsi_qla_host *ha)
1418 return QLA_SUCCESS; 1418 return QLA_SUCCESS;
1419} 1419}
1420 1420
1421inline void 1421void
1422qla4_8xxx_set_drv_active(struct scsi_qla_host *ha) 1422qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1423{ 1423{
1424 uint32_t drv_active; 1424 uint32_t drv_active;
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 7356a56ac458..be0ebce36e54 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -869,7 +869,9 @@ static int get_serial_info(struct m68k_serial * info,
869 tmp.close_delay = info->close_delay; 869 tmp.close_delay = info->close_delay;
870 tmp.closing_wait = info->closing_wait; 870 tmp.closing_wait = info->closing_wait;
871 tmp.custom_divisor = info->custom_divisor; 871 tmp.custom_divisor = info->custom_divisor;
872 copy_to_user(retinfo,&tmp,sizeof(*retinfo)); 872 if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
873 return -EFAULT;
874
873 return 0; 875 return 0;
874} 876}
875 877
@@ -882,7 +884,8 @@ static int set_serial_info(struct m68k_serial * info,
882 884
883 if (!new_info) 885 if (!new_info)
884 return -EFAULT; 886 return -EFAULT;
885 copy_from_user(&new_serial,new_info,sizeof(new_serial)); 887 if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
888 return -EFAULT;
886 old_info = *info; 889 old_info = *info;
887 890
888 if (!capable(CAP_SYS_ADMIN)) { 891 if (!capable(CAP_SYS_ADMIN)) {
@@ -943,8 +946,7 @@ static int get_lsr_info(struct m68k_serial * info, unsigned int *value)
943 status = 0; 946 status = 0;
944#endif 947#endif
945 local_irq_restore(flags); 948 local_irq_restore(flags);
946 put_user(status,value); 949 return put_user(status, value);
947 return 0;
948} 950}
949 951
950/* 952/*
@@ -999,27 +1001,18 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
999 send_break(info, arg ? arg*(100) : 250); 1001 send_break(info, arg ? arg*(100) : 250);
1000 return 0; 1002 return 0;
1001 case TIOCGSERIAL: 1003 case TIOCGSERIAL:
1002 if (access_ok(VERIFY_WRITE, (void *) arg, 1004 return get_serial_info(info,
1003 sizeof(struct serial_struct))) 1005 (struct serial_struct *) arg);
1004 return get_serial_info(info,
1005 (struct serial_struct *) arg);
1006 return -EFAULT;
1007 case TIOCSSERIAL: 1006 case TIOCSSERIAL:
1008 return set_serial_info(info, 1007 return set_serial_info(info,
1009 (struct serial_struct *) arg); 1008 (struct serial_struct *) arg);
1010 case TIOCSERGETLSR: /* Get line status register */ 1009 case TIOCSERGETLSR: /* Get line status register */
1011 if (access_ok(VERIFY_WRITE, (void *) arg, 1010 return get_lsr_info(info, (unsigned int *) arg);
1012 sizeof(unsigned int)))
1013 return get_lsr_info(info, (unsigned int *) arg);
1014 return -EFAULT;
1015 case TIOCSERGSTRUCT: 1011 case TIOCSERGSTRUCT:
1016 if (!access_ok(VERIFY_WRITE, (void *) arg, 1012 if (copy_to_user((struct m68k_serial *) arg,
1017 sizeof(struct m68k_serial))) 1013 info, sizeof(struct m68k_serial)))
1018 return -EFAULT; 1014 return -EFAULT;
1019 copy_to_user((struct m68k_serial *) arg,
1020 info, sizeof(struct m68k_serial));
1021 return 0; 1015 return 0;
1022
1023 default: 1016 default:
1024 return -ENOIOCTLCMD; 1017 return -ENOIOCTLCMD;
1025 } 1018 }
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c
index b745792ec25a..eaafb98debed 100644
--- a/drivers/serial/8250_early.c
+++ b/drivers/serial/8250_early.c
@@ -203,13 +203,13 @@ static int __init parse_options(struct early_serial8250_device *device,
203 203
204 if (mmio || mmio32) 204 if (mmio || mmio32)
205 printk(KERN_INFO 205 printk(KERN_INFO
206 "Early serial console at MMIO%s 0x%llu (options '%s')\n", 206 "Early serial console at MMIO%s 0x%llx (options '%s')\n",
207 mmio32 ? "32" : "", 207 mmio32 ? "32" : "",
208 (unsigned long long)port->mapbase, 208 (unsigned long long)port->mapbase,
209 device->options); 209 device->options);
210 else 210 else
211 printk(KERN_INFO 211 printk(KERN_INFO
212 "Early serial console at I/O port 0x%lu (options '%s')\n", 212 "Early serial console at I/O port 0x%lx (options '%s')\n",
213 port->iobase, 213 port->iobase,
214 device->options); 214 device->options);
215 215
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index e57fb3d228e2..5318dd3774ae 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
121 unsigned int sclk = get_sclk(); 121 unsigned int sclk = get_sclk();
122 122
123 /* Set TCR1 and TCR2, TFSR is not enabled for uart */ 123 /* Set TCR1 and TCR2, TFSR is not enabled for uart */
124 SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK)); 124 SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
125 SPORT_PUT_TCR2(up, size + 1); 125 SPORT_PUT_TCR2(up, size + 1);
126 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); 126 pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
127 127
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 659a695bdad6..2af8fd113123 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -14,11 +14,10 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/serial_core.h> 15#include <linux/serial_core.h>
16#include <linux/serial_8250.h> 16#include <linux/serial_8250.h>
17#include <linux/of_address.h>
17#include <linux/of_platform.h> 18#include <linux/of_platform.h>
18#include <linux/nwpserial.h> 19#include <linux/nwpserial.h>
19 20
20#include <asm/prom.h>
21
22struct of_serial_info { 21struct of_serial_info {
23 int type; 22 int type;
24 int line; 23 int line;
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 7e5e5efea4e2..cff9a306660f 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -492,7 +492,7 @@ sn_receive_chars(struct sn_cons_port *port, unsigned long flags)
492 sysrq_requested = 0; 492 sysrq_requested = 0;
493 if (ch && time_before(jiffies, sysrq_timeout)) { 493 if (ch && time_before(jiffies, sysrq_timeout)) {
494 spin_unlock_irqrestore(&port->sc_port.lock, flags); 494 spin_unlock_irqrestore(&port->sc_port.lock, flags);
495 handle_sysrq(ch, NULL); 495 handle_sysrq(ch);
496 spin_lock_irqsave(&port->sc_port.lock, flags); 496 spin_lock_irqsave(&port->sc_port.lock, flags);
497 /* ignore actual sysrq command char */ 497 /* ignore actual sysrq command char */
498 continue; 498 continue;
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index 544f2e25d0e5..6381a0282ee7 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -55,7 +55,12 @@ EXPORT_SYMBOL(sunserial_unregister_minors);
55int sunserial_console_match(struct console *con, struct device_node *dp, 55int sunserial_console_match(struct console *con, struct device_node *dp,
56 struct uart_driver *drv, int line, bool ignore_line) 56 struct uart_driver *drv, int line, bool ignore_line)
57{ 57{
58 if (!con || of_console_device != dp) 58 if (!con)
59 return 0;
60
61 drv->cons = con;
62
63 if (of_console_device != dp)
59 return 0; 64 return 0;
60 65
61 if (!ignore_line) { 66 if (!ignore_line) {
@@ -69,12 +74,10 @@ int sunserial_console_match(struct console *con, struct device_node *dp,
69 return 0; 74 return 0;
70 } 75 }
71 76
72 con->index = line; 77 if (!console_set_on_cmdline) {
73 drv->cons = con; 78 con->index = line;
74
75 if (!console_set_on_cmdline)
76 add_preferred_console(con->name, line, NULL); 79 add_preferred_console(con->name, line, NULL);
77 80 }
78 return 1; 81 return 1;
79} 82}
80EXPORT_SYMBOL(sunserial_console_match); 83EXPORT_SYMBOL(sunserial_console_match);
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c
index 59be3efe0636..052b3c7fa6a0 100644
--- a/drivers/spi/coldfire_qspi.c
+++ b/drivers/spi/coldfire_qspi.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/sched.h>
27#include <linux/workqueue.h> 28#include <linux/workqueue.h>
28#include <linux/delay.h> 29#include <linux/delay.h>
29#include <linux/io.h> 30#include <linux/io.h>
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4a7a7a7f11b6..335311a98fdc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -113,8 +113,6 @@ source "drivers/staging/vme/Kconfig"
113 113
114source "drivers/staging/memrar/Kconfig" 114source "drivers/staging/memrar/Kconfig"
115 115
116source "drivers/staging/sep/Kconfig"
117
118source "drivers/staging/iio/Kconfig" 116source "drivers/staging/iio/Kconfig"
119 117
120source "drivers/staging/zram/Kconfig" 118source "drivers/staging/zram/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ca5c03eb3ce3..e3f1e1b6095e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_FB_UDL) += udlfb/
38obj-$(CONFIG_HYPERV) += hv/ 38obj-$(CONFIG_HYPERV) += hv/
39obj-$(CONFIG_VME_BUS) += vme/ 39obj-$(CONFIG_VME_BUS) += vme/
40obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/ 40obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
41obj-$(CONFIG_DX_SEP) += sep/
42obj-$(CONFIG_IIO) += iio/ 41obj-$(CONFIG_IIO) += iio/
43obj-$(CONFIG_ZRAM) += zram/ 42obj-$(CONFIG_ZRAM) += zram/
44obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ 43obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
index b4a8d5eb64fa..05ca15a6c9f8 100644
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ b/drivers/staging/batman-adv/bat_sysfs.c
@@ -267,6 +267,10 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
267 if (atomic_read(&bat_priv->log_level) == log_level_tmp) 267 if (atomic_read(&bat_priv->log_level) == log_level_tmp)
268 return count; 268 return count;
269 269
270 bat_info(net_dev, "Changing log level from: %i to: %li\n",
271 atomic_read(&bat_priv->log_level),
272 log_level_tmp);
273
270 atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp); 274 atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
271 return count; 275 return count;
272} 276}
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 92c216a56885..baa8b05b9e8d 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -129,6 +129,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if)
129 129
130static void update_mac_addresses(struct batman_if *batman_if) 130static void update_mac_addresses(struct batman_if *batman_if)
131{ 131{
132 if (!batman_if || !batman_if->packet_buff)
133 return;
134
132 addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr); 135 addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
133 136
134 memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, 137 memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
@@ -194,8 +197,6 @@ static void hardif_activate_interface(struct net_device *net_dev,
194 if (batman_if->if_status != IF_INACTIVE) 197 if (batman_if->if_status != IF_INACTIVE)
195 return; 198 return;
196 199
197 dev_hold(batman_if->net_dev);
198
199 update_mac_addresses(batman_if); 200 update_mac_addresses(batman_if);
200 batman_if->if_status = IF_TO_BE_ACTIVATED; 201 batman_if->if_status = IF_TO_BE_ACTIVATED;
201 202
@@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct net_device *net_dev,
222 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 223 (batman_if->if_status != IF_TO_BE_ACTIVATED))
223 return; 224 return;
224 225
225 dev_put(batman_if->net_dev);
226
227 batman_if->if_status = IF_INACTIVE; 226 batman_if->if_status = IF_INACTIVE;
228 227
229 bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev); 228 bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
@@ -318,11 +317,13 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
318 if (ret != 1) 317 if (ret != 1)
319 goto out; 318 goto out;
320 319
320 dev_hold(net_dev);
321
321 batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); 322 batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
322 if (!batman_if) { 323 if (!batman_if) {
323 pr_err("Can't add interface (%s): out of memory\n", 324 pr_err("Can't add interface (%s): out of memory\n",
324 net_dev->name); 325 net_dev->name);
325 goto out; 326 goto release_dev;
326 } 327 }
327 328
328 batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC); 329 batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
@@ -336,6 +337,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
336 batman_if->if_num = -1; 337 batman_if->if_num = -1;
337 batman_if->net_dev = net_dev; 338 batman_if->net_dev = net_dev;
338 batman_if->if_status = IF_NOT_IN_USE; 339 batman_if->if_status = IF_NOT_IN_USE;
340 batman_if->packet_buff = NULL;
339 INIT_LIST_HEAD(&batman_if->list); 341 INIT_LIST_HEAD(&batman_if->list);
340 342
341 check_known_mac_addr(batman_if->net_dev->dev_addr); 343 check_known_mac_addr(batman_if->net_dev->dev_addr);
@@ -346,6 +348,8 @@ free_dev:
346 kfree(batman_if->dev); 348 kfree(batman_if->dev);
347free_if: 349free_if:
348 kfree(batman_if); 350 kfree(batman_if);
351release_dev:
352 dev_put(net_dev);
349out: 353out:
350 return NULL; 354 return NULL;
351} 355}
@@ -374,6 +378,7 @@ static void hardif_remove_interface(struct batman_if *batman_if)
374 batman_if->if_status = IF_TO_BE_REMOVED; 378 batman_if->if_status = IF_TO_BE_REMOVED;
375 list_del_rcu(&batman_if->list); 379 list_del_rcu(&batman_if->list);
376 sysfs_del_hardif(&batman_if->hardif_obj); 380 sysfs_del_hardif(&batman_if->hardif_obj);
381 dev_put(batman_if->net_dev);
377 call_rcu(&batman_if->rcu, hardif_free_interface); 382 call_rcu(&batman_if->rcu, hardif_free_interface);
378} 383}
379 384
@@ -393,15 +398,13 @@ static int hard_if_event(struct notifier_block *this,
393 /* FIXME: each batman_if will be attached to a softif */ 398 /* FIXME: each batman_if will be attached to a softif */
394 struct bat_priv *bat_priv = netdev_priv(soft_device); 399 struct bat_priv *bat_priv = netdev_priv(soft_device);
395 400
396 if (!batman_if) 401 if (!batman_if && event == NETDEV_REGISTER)
397 batman_if = hardif_add_interface(net_dev); 402 batman_if = hardif_add_interface(net_dev);
398 403
399 if (!batman_if) 404 if (!batman_if)
400 goto out; 405 goto out;
401 406
402 switch (event) { 407 switch (event) {
403 case NETDEV_REGISTER:
404 break;
405 case NETDEV_UP: 408 case NETDEV_UP:
406 hardif_activate_interface(soft_device, bat_priv, batman_if); 409 hardif_activate_interface(soft_device, bat_priv, batman_if);
407 break; 410 break;
@@ -442,8 +445,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
442 struct bat_priv *bat_priv = netdev_priv(soft_device); 445 struct bat_priv *bat_priv = netdev_priv(soft_device);
443 struct batman_packet *batman_packet; 446 struct batman_packet *batman_packet;
444 struct batman_if *batman_if; 447 struct batman_if *batman_if;
445 struct net_device_stats *stats;
446 struct rtnl_link_stats64 temp;
447 int ret; 448 int ret;
448 449
449 skb = skb_share_check(skb, GFP_ATOMIC); 450 skb = skb_share_check(skb, GFP_ATOMIC);
@@ -479,12 +480,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
479 if (batman_if->if_status != IF_ACTIVE) 480 if (batman_if->if_status != IF_ACTIVE)
480 goto err_free; 481 goto err_free;
481 482
482 stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp);
483 if (stats) {
484 stats->rx_packets++;
485 stats->rx_bytes += skb->len;
486 }
487
488 batman_packet = (struct batman_packet *)skb->data; 483 batman_packet = (struct batman_packet *)skb->data;
489 484
490 if (batman_packet->version != COMPAT_VERSION) { 485 if (batman_packet->version != COMPAT_VERSION) {
diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c
index fc3d32c12729..3ae7dd2d2d4d 100644
--- a/drivers/staging/batman-adv/icmp_socket.c
+++ b/drivers/staging/batman-adv/icmp_socket.c
@@ -67,6 +67,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
67 INIT_LIST_HEAD(&socket_client->queue_list); 67 INIT_LIST_HEAD(&socket_client->queue_list);
68 socket_client->queue_len = 0; 68 socket_client->queue_len = 0;
69 socket_client->index = i; 69 socket_client->index = i;
70 socket_client->bat_priv = inode->i_private;
70 spin_lock_init(&socket_client->lock); 71 spin_lock_init(&socket_client->lock);
71 init_waitqueue_head(&socket_client->queue_wait); 72 init_waitqueue_head(&socket_client->queue_wait);
72 73
@@ -151,9 +152,8 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
151static ssize_t bat_socket_write(struct file *file, const char __user *buff, 152static ssize_t bat_socket_write(struct file *file, const char __user *buff,
152 size_t len, loff_t *off) 153 size_t len, loff_t *off)
153{ 154{
154 /* FIXME: each orig_node->batman_if will be attached to a softif */
155 struct bat_priv *bat_priv = netdev_priv(soft_device);
156 struct socket_client *socket_client = file->private_data; 155 struct socket_client *socket_client = file->private_data;
156 struct bat_priv *bat_priv = socket_client->bat_priv;
157 struct icmp_packet_rr icmp_packet; 157 struct icmp_packet_rr icmp_packet;
158 struct orig_node *orig_node; 158 struct orig_node *orig_node;
159 struct batman_if *batman_if; 159 struct batman_if *batman_if;
@@ -168,6 +168,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
168 return -EINVAL; 168 return -EINVAL;
169 } 169 }
170 170
171 if (!bat_priv->primary_if)
172 return -EFAULT;
173
171 if (len >= sizeof(struct icmp_packet_rr)) 174 if (len >= sizeof(struct icmp_packet_rr))
172 packet_len = sizeof(struct icmp_packet_rr); 175 packet_len = sizeof(struct icmp_packet_rr);
173 176
@@ -223,7 +226,8 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
223 if (batman_if->if_status != IF_ACTIVE) 226 if (batman_if->if_status != IF_ACTIVE)
224 goto dst_unreach; 227 goto dst_unreach;
225 228
226 memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN); 229 memcpy(icmp_packet.orig,
230 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
227 231
228 if (packet_len == sizeof(struct icmp_packet_rr)) 232 if (packet_len == sizeof(struct icmp_packet_rr))
229 memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN); 233 memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
@@ -271,7 +275,7 @@ int bat_socket_setup(struct bat_priv *bat_priv)
271 goto err; 275 goto err;
272 276
273 d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR, 277 d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
274 bat_priv->debug_dir, NULL, &fops); 278 bat_priv->debug_dir, bat_priv, &fops);
275 if (d) 279 if (d)
276 goto err; 280 goto err;
277 281
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
index 2686019fe4e1..ef7c20ae7979 100644
--- a/drivers/staging/batman-adv/main.c
+++ b/drivers/staging/batman-adv/main.c
@@ -250,10 +250,13 @@ int choose_orig(void *data, int32_t size)
250int is_my_mac(uint8_t *addr) 250int is_my_mac(uint8_t *addr)
251{ 251{
252 struct batman_if *batman_if; 252 struct batman_if *batman_if;
253
253 rcu_read_lock(); 254 rcu_read_lock();
254 list_for_each_entry_rcu(batman_if, &if_list, list) { 255 list_for_each_entry_rcu(batman_if, &if_list, list) {
255 if ((batman_if->net_dev) && 256 if (batman_if->if_status != IF_ACTIVE)
256 (compare_orig(batman_if->net_dev->dev_addr, addr))) { 257 continue;
258
259 if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
257 rcu_read_unlock(); 260 rcu_read_unlock();
258 return 1; 261 return 1;
259 } 262 }
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
index 28bb627ffa13..de5a8c1a8104 100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@ -391,11 +391,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
391int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) 391int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
392{ 392{
393 struct orig_node *orig_node; 393 struct orig_node *orig_node;
394 unsigned long flags;
394 HASHIT(hashit); 395 HASHIT(hashit);
395 396
396 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 397 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
397 * if_num */ 398 * if_num */
398 spin_lock(&orig_hash_lock); 399 spin_lock_irqsave(&orig_hash_lock, flags);
399 400
400 while (hash_iterate(orig_hash, &hashit)) { 401 while (hash_iterate(orig_hash, &hashit)) {
401 orig_node = hashit.bucket->data; 402 orig_node = hashit.bucket->data;
@@ -404,11 +405,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
404 goto err; 405 goto err;
405 } 406 }
406 407
407 spin_unlock(&orig_hash_lock); 408 spin_unlock_irqrestore(&orig_hash_lock, flags);
408 return 0; 409 return 0;
409 410
410err: 411err:
411 spin_unlock(&orig_hash_lock); 412 spin_unlock_irqrestore(&orig_hash_lock, flags);
412 return -ENOMEM; 413 return -ENOMEM;
413} 414}
414 415
@@ -468,12 +469,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
468{ 469{
469 struct batman_if *batman_if_tmp; 470 struct batman_if *batman_if_tmp;
470 struct orig_node *orig_node; 471 struct orig_node *orig_node;
472 unsigned long flags;
471 HASHIT(hashit); 473 HASHIT(hashit);
472 int ret; 474 int ret;
473 475
474 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 476 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
475 * if_num */ 477 * if_num */
476 spin_lock(&orig_hash_lock); 478 spin_lock_irqsave(&orig_hash_lock, flags);
477 479
478 while (hash_iterate(orig_hash, &hashit)) { 480 while (hash_iterate(orig_hash, &hashit)) {
479 orig_node = hashit.bucket->data; 481 orig_node = hashit.bucket->data;
@@ -500,10 +502,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
500 rcu_read_unlock(); 502 rcu_read_unlock();
501 503
502 batman_if->if_num = -1; 504 batman_if->if_num = -1;
503 spin_unlock(&orig_hash_lock); 505 spin_unlock_irqrestore(&orig_hash_lock, flags);
504 return 0; 506 return 0;
505 507
506err: 508err:
507 spin_unlock(&orig_hash_lock); 509 spin_unlock_irqrestore(&orig_hash_lock, flags);
508 return -ENOMEM; 510 return -ENOMEM;
509} 511}
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 066cc9149bf1..032195e6de94 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -783,6 +783,8 @@ int recv_bat_packet(struct sk_buff *skb,
783 783
784static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len) 784static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
785{ 785{
786 /* FIXME: each batman_if will be attached to a softif */
787 struct bat_priv *bat_priv = netdev_priv(soft_device);
786 struct orig_node *orig_node; 788 struct orig_node *orig_node;
787 struct icmp_packet_rr *icmp_packet; 789 struct icmp_packet_rr *icmp_packet;
788 struct ethhdr *ethhdr; 790 struct ethhdr *ethhdr;
@@ -801,6 +803,9 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
801 return NET_RX_DROP; 803 return NET_RX_DROP;
802 } 804 }
803 805
806 if (!bat_priv->primary_if)
807 return NET_RX_DROP;
808
804 /* answer echo request (ping) */ 809 /* answer echo request (ping) */
805 /* get routing information */ 810 /* get routing information */
806 spin_lock_irqsave(&orig_hash_lock, flags); 811 spin_lock_irqsave(&orig_hash_lock, flags);
@@ -830,7 +835,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
830 } 835 }
831 836
832 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 837 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
833 memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); 838 memcpy(icmp_packet->orig,
839 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
834 icmp_packet->msg_type = ECHO_REPLY; 840 icmp_packet->msg_type = ECHO_REPLY;
835 icmp_packet->ttl = TTL; 841 icmp_packet->ttl = TTL;
836 842
@@ -845,6 +851,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
845 851
846static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len) 852static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
847{ 853{
854 /* FIXME: each batman_if will be attached to a softif */
855 struct bat_priv *bat_priv = netdev_priv(soft_device);
848 struct orig_node *orig_node; 856 struct orig_node *orig_node;
849 struct icmp_packet *icmp_packet; 857 struct icmp_packet *icmp_packet;
850 struct ethhdr *ethhdr; 858 struct ethhdr *ethhdr;
@@ -865,6 +873,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
865 return NET_RX_DROP; 873 return NET_RX_DROP;
866 } 874 }
867 875
876 if (!bat_priv->primary_if)
877 return NET_RX_DROP;
878
868 /* get routing information */ 879 /* get routing information */
869 spin_lock_irqsave(&orig_hash_lock, flags); 880 spin_lock_irqsave(&orig_hash_lock, flags);
870 orig_node = ((struct orig_node *) 881 orig_node = ((struct orig_node *)
@@ -892,7 +903,8 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
892 } 903 }
893 904
894 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 905 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
895 memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN); 906 memcpy(icmp_packet->orig,
907 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
896 icmp_packet->msg_type = TTL_EXCEEDED; 908 icmp_packet->msg_type = TTL_EXCEEDED;
897 icmp_packet->ttl = TTL; 909 icmp_packet->ttl = TTL;
898 910
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
index 21d0717afb09..9aa9d369c752 100644
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@ -126,6 +126,7 @@ struct socket_client {
126 unsigned char index; 126 unsigned char index;
127 spinlock_t lock; 127 spinlock_t lock;
128 wait_queue_head_t queue_wait; 128 wait_queue_head_t queue_wait;
129 struct bat_priv *bat_priv;
129}; 130};
130 131
131struct socket_packet { 132struct socket_packet {
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index c6aa52f8dcee..48d9fb1227df 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -222,7 +222,6 @@ static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
222 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; 222 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
223 p_dev->resource[0]->flags |= 223 p_dev->resource[0]->flags |=
224 pcmcia_io_cfg_data_width(io->flags); 224 pcmcia_io_cfg_data_width(io->flags);
225 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
226 p_dev->resource[0]->start = io->win[0].base; 225 p_dev->resource[0]->start = io->win[0].base;
227 p_dev->resource[0]->end = io->win[0].len; 226 p_dev->resource[0]->end = io->win[0].len;
228 if (io->nwin > 1) { 227 if (io->nwin > 1) {
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 56e11575c977..64a01147ecae 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -327,6 +327,9 @@ static const struct net_device_ops device_ops = {
327 .ndo_stop = netvsc_close, 327 .ndo_stop = netvsc_close,
328 .ndo_start_xmit = netvsc_start_xmit, 328 .ndo_start_xmit = netvsc_start_xmit,
329 .ndo_set_multicast_list = netvsc_set_multicast_list, 329 .ndo_set_multicast_list = netvsc_set_multicast_list,
330 .ndo_change_mtu = eth_change_mtu,
331 .ndo_validate_addr = eth_validate_addr,
332 .ndo_set_mac_address = eth_mac_addr,
330}; 333};
331 334
332static int netvsc_probe(struct device *device) 335static int netvsc_probe(struct device *device)
diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c
index 17bc7626f70a..d78c569ac94a 100644
--- a/drivers/staging/hv/ring_buffer.c
+++ b/drivers/staging/hv/ring_buffer.c
@@ -193,8 +193,7 @@ Description:
193static inline u64 193static inline u64
194GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo) 194GetRingBufferIndices(struct hv_ring_buffer_info *RingInfo)
195{ 195{
196 return ((u64)RingInfo->RingBuffer->WriteIndex << 32) 196 return (u64)RingInfo->RingBuffer->WriteIndex << 32;
197 || RingInfo->RingBuffer->ReadIndex;
198} 197}
199 198
200 199
diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h
index 0063bde9a4b2..8505a1c5f9ee 100644
--- a/drivers/staging/hv/storvsc_api.h
+++ b/drivers/staging/hv/storvsc_api.h
@@ -28,10 +28,10 @@
28#include "vmbus_api.h" 28#include "vmbus_api.h"
29 29
30/* Defines */ 30/* Defines */
31#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE) 31#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
32#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) 32#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
33 33
34#define STORVSC_MAX_IO_REQUESTS 64 34#define STORVSC_MAX_IO_REQUESTS 128
35 35
36/* 36/*
37 * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In 37 * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index 075b61bd492f..62882a437aa4 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -495,7 +495,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
495 495
496 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ 496 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
497 497
498 if (j == 0) 498 if (bounce_addr == 0)
499 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); 499 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
500 500
501 while (srclen) { 501 while (srclen) {
@@ -556,7 +556,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
556 destlen = orig_sgl[i].length; 556 destlen = orig_sgl[i].length;
557 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ 557 /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */
558 558
559 if (j == 0) 559 if (bounce_addr == 0)
560 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); 560 bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
561 561
562 while (destlen) { 562 while (destlen) {
@@ -615,6 +615,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
615 unsigned int request_size = 0; 615 unsigned int request_size = 0;
616 int i; 616 int i;
617 struct scatterlist *sgl; 617 struct scatterlist *sgl;
618 unsigned int sg_count = 0;
618 619
619 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d " 620 DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
620 "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction, 621 "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
@@ -697,6 +698,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
697 request->DataBuffer.Length = scsi_bufflen(scmnd); 698 request->DataBuffer.Length = scsi_bufflen(scmnd);
698 if (scsi_sg_count(scmnd)) { 699 if (scsi_sg_count(scmnd)) {
699 sgl = (struct scatterlist *)scsi_sglist(scmnd); 700 sgl = (struct scatterlist *)scsi_sglist(scmnd);
701 sg_count = scsi_sg_count(scmnd);
700 702
701 /* check if we need to bounce the sgl */ 703 /* check if we need to bounce the sgl */
702 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { 704 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
@@ -731,15 +733,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
731 scsi_sg_count(scmnd)); 733 scsi_sg_count(scmnd));
732 734
733 sgl = cmd_request->bounce_sgl; 735 sgl = cmd_request->bounce_sgl;
736 sg_count = cmd_request->bounce_sgl_count;
734 } 737 }
735 738
736 request->DataBuffer.Offset = sgl[0].offset; 739 request->DataBuffer.Offset = sgl[0].offset;
737 740
738 for (i = 0; i < scsi_sg_count(scmnd); i++) { 741 for (i = 0; i < sg_count; i++) {
739 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", 742 DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n",
740 i, sgl[i].length, sgl[i].offset); 743 i, sgl[i].length, sgl[i].offset);
741 request->DataBuffer.PfnArray[i] = 744 request->DataBuffer.PfnArray[i] =
742 page_to_pfn(sg_page((&sgl[i]))); 745 page_to_pfn(sg_page((&sgl[i])));
743 } 746 }
744 } else if (scsi_sglist(scmnd)) { 747 } else if (scsi_sglist(scmnd)) {
745 /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ 748 /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 638ad6b35891..9493128e5fd2 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,6 +1,6 @@
1config OCTEON_ETHERNET 1config OCTEON_ETHERNET
2 tristate "Cavium Networks Octeon Ethernet support" 2 tristate "Cavium Networks Octeon Ethernet support"
3 depends on CPU_CAVIUM_OCTEON 3 depends on CPU_CAVIUM_OCTEON && NETDEVICES
4 select PHYLIB 4 select PHYLIB
5 select MDIO_OCTEON 5 select MDIO_OCTEON
6 help 6 help
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c
index cdc4dd50d638..8ec83d2dffb7 100644
--- a/drivers/staging/pohmelfs/path_entry.c
+++ b/drivers/staging/pohmelfs/path_entry.c
@@ -44,9 +44,9 @@ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int le
44 return -ENOENT; 44 return -ENOENT;
45 } 45 }
46 46
47 read_lock(&current->fs->lock); 47 spin_lock(&current->fs->lock);
48 path.mnt = mntget(current->fs->root.mnt); 48 path.mnt = mntget(current->fs->root.mnt);
49 read_unlock(&current->fs->lock); 49 spin_unlock(&current->fs->lock);
50 50
51 path.dentry = d; 51 path.dentry = d;
52 52
@@ -91,9 +91,9 @@ int pohmelfs_path_length(struct pohmelfs_inode *pi)
91 return -ENOENT; 91 return -ENOENT;
92 } 92 }
93 93
94 read_lock(&current->fs->lock); 94 spin_lock(&current->fs->lock);
95 root = dget(current->fs->root.dentry); 95 root = dget(current->fs->root.dentry);
96 read_unlock(&current->fs->lock); 96 spin_unlock(&current->fs->lock);
97 97
98 spin_lock(&dcache_lock); 98 spin_lock(&dcache_lock);
99 99
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index a0fe31de0a6d..ebf9074a9083 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = {
44 {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */ 44 {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */
45 {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */ 45 {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */
46 {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */ 46 {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */
47 {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */
47 {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */ 48 {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */
48 {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */ 49 {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */
49 {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */ 50 {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */
@@ -95,7 +96,8 @@ struct usb_device_id rtusb_usb_id[] = {
95 {USB_DEVICE(0x050d, 0x815c)}, 96 {USB_DEVICE(0x050d, 0x815c)},
96 {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */ 97 {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */
97 {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */ 98 {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */
98 {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */ 99 {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */
100 {USB_DEVICE(0x1690, 0x0740)}, /* Askey */
99 {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */ 101 {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */
100 {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ 102 {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */
101 {USB_DEVICE(0x7392, 0x7718)}, 103 {USB_DEVICE(0x7392, 0x7718)},
@@ -105,21 +107,34 @@ struct usb_device_id rtusb_usb_id[] = {
105 {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ 107 {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */
106 {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ 108 {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */
107 {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ 109 {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */
110 {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */
108#endif /* RT2870 // */ 111#endif /* RT2870 // */
109#ifdef RT3070 112#ifdef RT3070
110 {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */ 113 {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */
111 {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */ 114 {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */
112 {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */ 115 {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */
113 {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */ 116 {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */
117 {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */
118 {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */
119 {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */
120 {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */
114 {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */ 121 {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */
115 {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */ 122 {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */
123 {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */
124 {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */
116 {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */ 125 {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */
117 {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */ 126 {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */
118 {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */ 127 {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */
128 {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */
129 {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */
119 {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */ 130 {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */
120 {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */ 131 {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */
121 {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */ 132 {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */
133 {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */
134 {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */
135 {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */
122 {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */ 136 {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */
137 {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/
123 {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */ 138 {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */
124 {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */ 139 {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */
125 {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */ 140 {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */
@@ -132,14 +147,36 @@ struct usb_device_id rtusb_usb_id[] = {
132 {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */ 147 {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */
133 {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */ 148 {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */
134 {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */ 149 {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */
150 {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */
151 {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */
135 {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */ 152 {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */
136 {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */ 153 {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */
137 {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */ 154 {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */
138 {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */ 155 {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */
139 {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */ 156 {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */
157 {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */
158 {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */
140 {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */ 159 {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */
160 {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */
161 {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */
162 {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/
163 {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/
164 {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/
165 {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/
166 {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */
167 {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */
168 {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */
169 {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */
170 {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */
171 {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */
172 {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */
173 {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */
174 {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */
175 {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */
176 {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */
177 {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */
178 {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */
141#endif /* RT3070 // */ 179#endif /* RT3070 // */
142 {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */
143 {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */ 180 {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */
144 {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ 181 {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */
145 {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ 182 {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
deleted file mode 100644
index 0a9c39c7f2bd..000000000000
--- a/drivers/staging/sep/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config DX_SEP
2 tristate "Discretix SEP driver"
3# depends on MRST
4 depends on RAR_REGISTER && PCI
5 default y
6 help
7 Discretix SEP driver
8
9 If unsure say M. The compiled module will be
10 called sep_driver.ko
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
deleted file mode 100644
index 628d5f919414..000000000000
--- a/drivers/staging/sep/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1obj-$(CONFIG_DX_SEP) := sep_driver.o
2
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
deleted file mode 100644
index ff0e931dab64..000000000000
--- a/drivers/staging/sep/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
1Todo's so far (from Alan Cox)
2- Fix firmware loading
3- Get firmware into firmware git tree
4- Review and tidy each algorithm function
5- Check whether it can be plugged into any of the kernel crypto API
6 interfaces
7- Do something about the magic shared memory interface and replace it
8 with something saner (in Linux terms)
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
deleted file mode 100644
index 9200524bb64d..000000000000
--- a/drivers/staging/sep/sep_dev.h
+++ /dev/null
@@ -1,110 +0,0 @@
1#ifndef __SEP_DEV_H__
2#define __SEP_DEV_H__
3
4/*
5 *
6 * sep_dev.h - Security Processor Device Structures
7 *
8 * Copyright(c) 2009 Intel Corporation. All rights reserved.
9 * Copyright(c) 2009 Discretix. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc., 59
23 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 * CONTACTS:
26 *
27 * Alan Cox alan@linux.intel.com
28 *
29 */
30
31struct sep_device {
32 /* pointer to pci dev */
33 struct pci_dev *pdev;
34
35 unsigned long in_use;
36
37 /* address of the shared memory allocated during init for SEP driver
38 (coherent alloc) */
39 void *shared_addr;
40 /* the physical address of the shared area */
41 dma_addr_t shared_bus;
42
43 /* restricted access region (coherent alloc) */
44 dma_addr_t rar_bus;
45 void *rar_addr;
46 /* firmware regions: cache is at rar_addr */
47 unsigned long cache_size;
48
49 /* follows the cache */
50 dma_addr_t resident_bus;
51 unsigned long resident_size;
52 void *resident_addr;
53
54 /* start address of the access to the SEP registers from driver */
55 void __iomem *reg_addr;
56 /* transaction counter that coordinates the transactions between SEP and HOST */
57 unsigned long send_ct;
58 /* counter for the messages from sep */
59 unsigned long reply_ct;
60 /* counter for the number of bytes allocated in the pool for the current
61 transaction */
62 unsigned long data_pool_bytes_allocated;
63
64 /* array of pointers to the pages that represent input data for the synchronic
65 DMA action */
66 struct page **in_page_array;
67
68 /* array of pointers to the pages that represent out data for the synchronic
69 DMA action */
70 struct page **out_page_array;
71
72 /* number of pages in the sep_in_page_array */
73 unsigned long in_num_pages;
74
75 /* number of pages in the sep_out_page_array */
76 unsigned long out_num_pages;
77
78 /* global data for every flow */
79 struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS];
80
81 /* pointer to the workqueue that handles the flow done interrupts */
82 struct workqueue_struct *flow_wq;
83
84};
85
86static struct sep_device *sep_dev;
87
88static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
89{
90 void __iomem *addr = dev->reg_addr + reg;
91 writel(value, addr);
92}
93
94static inline u32 sep_read_reg(struct sep_device *dev, int reg)
95{
96 void __iomem *addr = dev->reg_addr + reg;
97 return readl(addr);
98}
99
100/* wait for SRAM write complete(indirect write */
101static inline void sep_wait_sram_write(struct sep_device *dev)
102{
103 u32 reg_val;
104 do
105 reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
106 while (!(reg_val & 1));
107}
108
109
110#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index ecbde3467b1b..000000000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2742 +0,0 @@
1/*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/fs.h>
35#include <linux/cdev.h>
36#include <linux/kdev_t.h>
37#include <linux/mutex.h>
38#include <linux/sched.h>
39#include <linux/mm.h>
40#include <linux/poll.h>
41#include <linux/wait.h>
42#include <linux/pci.h>
43#include <linux/firmware.h>
44#include <linux/slab.h>
45#include <asm/ioctl.h>
46#include <linux/ioport.h>
47#include <asm/io.h>
48#include <linux/interrupt.h>
49#include <linux/pagemap.h>
50#include <asm/cacheflush.h>
51#include "sep_driver_hw_defs.h"
52#include "sep_driver_config.h"
53#include "sep_driver_api.h"
54#include "sep_dev.h"
55
56#if SEP_DRIVER_ARM_DEBUG_MODE
57
58#define CRYS_SEP_ROM_length 0x4000
59#define CRYS_SEP_ROM_start_address 0x8000C000UL
60#define CRYS_SEP_ROM_start_address_offset 0xC000UL
61#define SEP_ROM_BANK_register 0x80008420UL
62#define SEP_ROM_BANK_register_offset 0x8420UL
63#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64
65/*
66 * THESE 2 definitions are specific to the board - must be
67 * defined during integration
68 */
69#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
70
71/* 2M size */
72
73static void sep_load_rom_code(struct sep_device *sep)
74{
75 /* Index variables */
76 unsigned long i, k, j;
77 u32 reg;
78 u32 error;
79 u32 warning;
80
81 /* Loading ROM from SEP_ROM_image.h file */
82 k = sizeof(CRYS_SEP_ROM);
83
84 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
85
86 edbg("SEP Driver: k is %lu\n", k);
87 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
88 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
89
90 for (i = 0; i < 4; i++) {
91 /* write bank */
92 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
93
94 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
95 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
96
97 k = k - 4;
98
99 if (k == 0) {
100 j = CRYS_SEP_ROM_length;
101 i = 4;
102 }
103 }
104 }
105
106 /* reset the SEP */
107 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
108
109 /* poll for SEP ROM boot finish */
110 do
111 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
112 while (!reg);
113
114 edbg("SEP Driver: ROM polling ended\n");
115
116 switch (reg) {
117 case 0x1:
118 /* fatal error - read erro status from GPRO */
119 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
120 edbg("SEP Driver: ROM polling case 1\n");
121 break;
122 case 0x4:
123 /* Cold boot ended successfully */
124 case 0x8:
125 /* Warmboot ended successfully */
126 case 0x10:
127 /* ColdWarm boot ended successfully */
128 error = 0;
129 case 0x2:
130 /* Boot First Phase ended */
131 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
132 case 0x20:
133 edbg("SEP Driver: ROM polling case %d\n", reg);
134 break;
135 }
136
137}
138
139#else
140static void sep_load_rom_code(struct sep_device *sep) { }
141#endif /* SEP_DRIVER_ARM_DEBUG_MODE */
142
143
144
145/*----------------------------------------
146 DEFINES
147-----------------------------------------*/
148
149#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
150#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
151
152/*--------------------------------------------
153 GLOBAL variables
154--------------------------------------------*/
155
156/* debug messages level */
157static int debug;
158module_param(debug, int , 0);
159MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
160
161/* Keep this a single static object for now to keep the conversion easy */
162
163static struct sep_device sep_instance;
164static struct sep_device *sep_dev = &sep_instance;
165
166/*
167 mutex for the access to the internals of the sep driver
168*/
169static DEFINE_MUTEX(sep_mutex);
170
171
172/* wait queue head (event) of the driver */
173static DECLARE_WAIT_QUEUE_HEAD(sep_event);
174
175/**
176 * sep_load_firmware - copy firmware cache/resident
177 * @sep: device we are loading
178 *
179 * This functions copies the cache and resident from their source
180 * location into destination shared memory.
181 */
182
183static int sep_load_firmware(struct sep_device *sep)
184{
185 const struct firmware *fw;
186 char *cache_name = "sep/cache.image.bin";
187 char *res_name = "sep/resident.image.bin";
188 int error;
189
190 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
191 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
192
193 /* load cache */
194 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
195 if (error) {
196 edbg("SEP Driver:cant request cache fw\n");
197 return error;
198 }
199 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
200
201 memcpy(sep->rar_addr, (void *)fw->data, fw->size);
202 sep->cache_size = fw->size;
203 release_firmware(fw);
204
205 sep->resident_bus = sep->rar_bus + sep->cache_size;
206 sep->resident_addr = sep->rar_addr + sep->cache_size;
207
208 /* load resident */
209 error = request_firmware(&fw, res_name, &sep->pdev->dev);
210 if (error) {
211 edbg("SEP Driver:cant request res fw\n");
212 return error;
213 }
214 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
215
216 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
217 sep->resident_size = fw->size;
218 release_firmware(fw);
219
220 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
221 sep->resident_addr, (unsigned long long)sep->resident_bus,
222 sep->rar_addr, (unsigned long long)sep->rar_bus);
223 return 0;
224}
225
226MODULE_FIRMWARE("sep/cache.image.bin");
227MODULE_FIRMWARE("sep/resident.image.bin");
228
229/**
230 * sep_map_and_alloc_shared_area - allocate shared block
231 * @sep: security processor
232 * @size: size of shared area
233 *
234 * Allocate a shared buffer in host memory that can be used by both the
235 * kernel and also the hardware interface via DMA.
236 */
237
238static int sep_map_and_alloc_shared_area(struct sep_device *sep,
239 unsigned long size)
240{
241 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
242 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
243 &sep->shared_bus, GFP_KERNEL);
244
245 if (!sep->shared_addr) {
246 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
247 return -ENOMEM;
248 }
249 /* set the bus address of the shared area */
250 edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
251 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
252 return 0;
253}
254
255/**
256 * sep_unmap_and_free_shared_area - free shared block
257 * @sep: security processor
258 *
259 * Free the shared area allocated to the security processor. The
260 * processor must have finished with this and any final posted
261 * writes cleared before we do so.
262 */
263static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
264{
265 dma_free_coherent(&sep->pdev->dev, size,
266 sep->shared_addr, sep->shared_bus);
267}
268
269/**
270 * sep_shared_virt_to_bus - convert bus/virt addresses
271 *
272 * Returns the bus address inside the shared area according
273 * to the virtual address.
274 */
275
276static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
277 void *virt_address)
278{
279 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
280 edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
281 virt_address);
282 return pa;
283}
284
285/**
286 * sep_shared_bus_to_virt - convert bus/virt addresses
287 *
288 * Returns virtual address inside the shared area according
289 * to the bus address.
290 */
291
292static void *sep_shared_bus_to_virt(struct sep_device *sep,
293 dma_addr_t bus_address)
294{
295 return sep->shared_addr + (bus_address - sep->shared_bus);
296}
297
298
299/**
300 * sep_try_open - attempt to open a SEP device
301 * @sep: device to attempt to open
302 *
303 * Atomically attempt to get ownership of a SEP device.
304 * Returns 1 if the device was opened, 0 on failure.
305 */
306
307static int sep_try_open(struct sep_device *sep)
308{
309 if (!test_and_set_bit(0, &sep->in_use))
310 return 1;
311 return 0;
312}
313
314/**
315 * sep_open - device open method
316 * @inode: inode of sep device
317 * @filp: file handle to sep device
318 *
319 * Open method for the SEP device. Called when userspace opens
320 * the SEP device node. Must also release the memory data pool
321 * allocations.
322 *
323 * Returns zero on success otherwise an error code.
324 */
325
326static int sep_open(struct inode *inode, struct file *filp)
327{
328 if (sep_dev == NULL)
329 return -ENODEV;
330
331 /* check the blocking mode */
332 if (filp->f_flags & O_NDELAY) {
333 if (sep_try_open(sep_dev) == 0)
334 return -EAGAIN;
335 } else
336 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
337 return -EINTR;
338
339 /* Bind to the device, we only have one which makes it easy */
340 filp->private_data = sep_dev;
341 /* release data pool allocations */
342 sep_dev->data_pool_bytes_allocated = 0;
343 return 0;
344}
345
346
347/**
348 * sep_release - close a SEP device
349 * @inode: inode of SEP device
350 * @filp: file handle being closed
351 *
352 * Called on the final close of a SEP device. As the open protects against
353 * multiple simultaenous opens that means this method is called when the
354 * final reference to the open handle is dropped.
355 */
356
357static int sep_release(struct inode *inode, struct file *filp)
358{
359 struct sep_device *sep = filp->private_data;
360#if 0 /*!SEP_DRIVER_POLLING_MODE */
361 /* close IMR */
362 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
363 /* release IRQ line */
364 free_irq(SEP_DIRVER_IRQ_NUM, sep);
365
366#endif
367 /* Ensure any blocked open progresses */
368 clear_bit(0, &sep->in_use);
369 wake_up(&sep_event);
370 return 0;
371}
372
373/*---------------------------------------------------------------
374 map function - this functions maps the message shared area
375-----------------------------------------------------------------*/
376static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
377{
378 dma_addr_t bus_addr;
379 struct sep_device *sep = filp->private_data;
380
381 dbg("-------->SEP Driver: mmap start\n");
382
383 /* check that the size of the mapped range is as the size of the message
384 shared area */
385 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
386 edbg("SEP Driver mmap requested size is more than allowed\n");
387 printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
388 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
389 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
390 return -EAGAIN;
391 }
392
393 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
394
395 /* get bus address */
396 bus_addr = sep->shared_bus;
397
398 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
399
400 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
401 edbg("SEP Driver remap_page_range failed\n");
402 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
403 return -EAGAIN;
404 }
405
406 dbg("SEP Driver:<-------- mmap end\n");
407
408 return 0;
409}
410
411
412/*-----------------------------------------------
413 poll function
414*----------------------------------------------*/
415static unsigned int sep_poll(struct file *filp, poll_table * wait)
416{
417 unsigned long count;
418 unsigned int mask = 0;
419 unsigned long retval = 0; /* flow id */
420 struct sep_device *sep = filp->private_data;
421
422 dbg("---------->SEP Driver poll: start\n");
423
424
425#if SEP_DRIVER_POLLING_MODE
426
427 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
428 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
429
430 for (count = 0; count < 10 * 4; count += 4)
431 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
432 }
433
434 sep->reply_ct++;
435#else
436 /* add the event to the polling wait table */
437 poll_wait(filp, &sep_event, wait);
438
439#endif
440
441 edbg("sep->send_ct is %lu\n", sep->send_ct);
442 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
443
444 /* check if the data is ready */
445 if (sep->send_ct == sep->reply_ct) {
446 for (count = 0; count < 12 * 4; count += 4)
447 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
448
449 for (count = 0; count < 10 * 4; count += 4)
450 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
451
452 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
453 edbg("retval is %lu\n", retval);
454 /* check if the this is sep reply or request */
455 if (retval >> 31) {
456 edbg("SEP Driver: sep request in\n");
457 /* request */
458 mask |= POLLOUT | POLLWRNORM;
459 } else {
460 edbg("SEP Driver: sep reply in\n");
461 mask |= POLLIN | POLLRDNORM;
462 }
463 }
464 dbg("SEP Driver:<-------- poll exit\n");
465 return mask;
466}
467
468/**
469 * sep_time_address - address in SEP memory of time
470 * @sep: SEP device we want the address from
471 *
472 * Return the address of the two dwords in memory used for time
473 * setting.
474 */
475
476static u32 *sep_time_address(struct sep_device *sep)
477{
478 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
479}
480
481/**
482 * sep_set_time - set the SEP time
483 * @sep: the SEP we are setting the time for
484 *
485 * Calculates time and sets it at the predefined address.
486 * Called with the sep mutex held.
487 */
488static unsigned long sep_set_time(struct sep_device *sep)
489{
490 struct timeval time;
491 u32 *time_addr; /* address of time as seen by the kernel */
492
493
494 dbg("sep:sep_set_time start\n");
495
496 do_gettimeofday(&time);
497
498 /* set value in the SYSTEM MEMORY offset */
499 time_addr = sep_time_address(sep);
500
501 time_addr[0] = SEP_TIME_VAL_TOKEN;
502 time_addr[1] = time.tv_sec;
503
504 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
505 edbg("SEP Driver:time_addr is %p\n", time_addr);
506 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
507
508 return time.tv_sec;
509}
510
511/**
512 * sep_dump_message - dump the message that is pending
513 * @sep: sep device
514 *
515 * Dump out the message pending in the shared message area
516 */
517
518static void sep_dump_message(struct sep_device *sep)
519{
520 int count;
521 for (count = 0; count < 12 * 4; count += 4)
522 edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
523}
524
525/**
526 * sep_send_command_handler - kick off a command
527 * @sep: sep being signalled
528 *
529 * This function raises interrupt to SEP that signals that is has a new
530 * command from the host
531 */
532
533static void sep_send_command_handler(struct sep_device *sep)
534{
535 dbg("sep:sep_send_command_handler start\n");
536
537 mutex_lock(&sep_mutex);
538 sep_set_time(sep);
539
540 /* FIXME: flush cache */
541 flush_cache_all();
542
543 sep_dump_message(sep);
544 /* update counter */
545 sep->send_ct++;
546 /* send interrupt to SEP */
547 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
548 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
549 mutex_unlock(&sep_mutex);
550 return;
551}
552
553/**
554 * sep_send_reply_command_handler - kick off a command reply
555 * @sep: sep being signalled
556 *
557 * This function raises interrupt to SEP that signals that is has a new
558 * command from the host
559 */
560
561static void sep_send_reply_command_handler(struct sep_device *sep)
562{
563 dbg("sep:sep_send_reply_command_handler start\n");
564
565 /* flash cache */
566 flush_cache_all();
567
568 sep_dump_message(sep);
569
570 mutex_lock(&sep_mutex);
571 sep->send_ct++; /* update counter */
572 /* send the interrupt to SEP */
573 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
574 /* update both counters */
575 sep->send_ct++;
576 sep->reply_ct++;
577 mutex_unlock(&sep_mutex);
578 dbg("sep: sep_send_reply_command_handler end\n");
579}
580
581/*
582 This function handles the allocate data pool memory request
583 This function returns calculates the bus address of the
584 allocated memory, and the offset of this area from the mapped address.
585 Therefore, the FVOs in user space can calculate the exact virtual
586 address of this allocated memory
587*/
588static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
589 unsigned long arg)
590{
591 int error;
592 struct sep_driver_alloc_t command_args;
593
594 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
595
596 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
597 if (error) {
598 error = -EFAULT;
599 goto end_function;
600 }
601
602 /* allocate memory */
603 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
604 error = -ENOMEM;
605 goto end_function;
606 }
607
608 /* set the virtual and bus address */
609 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
610 command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
611
612 /* write the memory back to the user space */
613 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
614 if (error) {
615 error = -EFAULT;
616 goto end_function;
617 }
618
619 /* set the allocation */
620 sep->data_pool_bytes_allocated += command_args.num_bytes;
621
622end_function:
623 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
624 return error;
625}
626
627/*
628 This function handles write into allocated data pool command
629*/
630static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
631{
632 int error;
633 void *virt_address;
634 unsigned long va;
635 unsigned long app_in_address;
636 unsigned long num_bytes;
637 void *data_pool_area_addr;
638
639 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
640
641 /* get the application address */
642 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
643 if (error)
644 goto end_function;
645
646 /* get the virtual kernel address address */
647 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
648 if (error)
649 goto end_function;
650 virt_address = (void *)va;
651
652 /* get the number of bytes */
653 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
654 if (error)
655 goto end_function;
656
657 /* calculate the start of the data pool */
658 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
659
660
661 /* check that the range of the virtual kernel address is correct */
662 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
663 error = -EINVAL;
664 goto end_function;
665 }
666 /* copy the application data */
667 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
668 if (error)
669 error = -EFAULT;
670end_function:
671 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
672 return error;
673}
674
675/*
676 this function handles the read from data pool command
677*/
678static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
679{
680 int error;
681 /* virtual address of dest application buffer */
682 unsigned long app_out_address;
683 /* virtual address of the data pool */
684 unsigned long va;
685 void *virt_address;
686 unsigned long num_bytes;
687 void *data_pool_area_addr;
688
689 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
690
691 /* get the application address */
692 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
693 if (error)
694 goto end_function;
695
696 /* get the virtual kernel address address */
697 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
698 if (error)
699 goto end_function;
700 virt_address = (void *)va;
701
702 /* get the number of bytes */
703 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
704 if (error)
705 goto end_function;
706
707 /* calculate the start of the data pool */
708 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
709
710 /* FIXME: These are incomplete all over the driver: what about + len
711 and when doing that also overflows */
712 /* check that the range of the virtual kernel address is correct */
713 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
714 error = -EINVAL;
715 goto end_function;
716 }
717
718 /* copy the application data */
719 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
720 if (error)
721 error = -EFAULT;
722end_function:
723 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
724 return error;
725}
726
727/*
728 This function releases all the application virtual buffer physical pages,
729 that were previously locked
730*/
731static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
732{
733 unsigned long count;
734
735 if (dirtyFlag) {
736 for (count = 0; count < num_pages; count++) {
737 /* the out array was written, therefore the data was changed */
738 if (!PageReserved(page_array_ptr[count]))
739 SetPageDirty(page_array_ptr[count]);
740 page_cache_release(page_array_ptr[count]);
741 }
742 } else {
743 /* free in pages - the data was only read, therefore no update was done
744 on those pages */
745 for (count = 0; count < num_pages; count++)
746 page_cache_release(page_array_ptr[count]);
747 }
748
749 if (page_array_ptr)
750 /* free the array */
751 kfree(page_array_ptr);
752
753 return 0;
754}
755
756/*
757 This function locks all the physical pages of the kernel virtual buffer
758 and construct a basic lli array, where each entry holds the physical
759 page address and the size that application data holds in this physical pages
760*/
761static int sep_lock_kernel_pages(struct sep_device *sep,
762 unsigned long kernel_virt_addr,
763 unsigned long data_size,
764 unsigned long *num_pages_ptr,
765 struct sep_lli_entry_t **lli_array_ptr,
766 struct page ***page_array_ptr)
767{
768 int error = 0;
769 /* the the page of the end address of the user space buffer */
770 unsigned long end_page;
771 /* the page of the start address of the user space buffer */
772 unsigned long start_page;
773 /* the range in pages */
774 unsigned long num_pages;
775 struct sep_lli_entry_t *lli_array;
776 /* next kernel address to map */
777 unsigned long next_kernel_address;
778 unsigned long count;
779
780 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
781
782 /* set start and end pages and num pages */
783 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
784 start_page = kernel_virt_addr >> PAGE_SHIFT;
785 num_pages = end_page - start_page + 1;
786
787 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
788 edbg("SEP Driver: data_size is %lu\n", data_size);
789 edbg("SEP Driver: start_page is %lx\n", start_page);
790 edbg("SEP Driver: end_page is %lx\n", end_page);
791 edbg("SEP Driver: num_pages is %lu\n", num_pages);
792
793 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
794 if (!lli_array) {
795 edbg("SEP Driver: kmalloc for lli_array failed\n");
796 error = -ENOMEM;
797 goto end_function;
798 }
799
800 /* set the start address of the first page - app data may start not at
801 the beginning of the page */
802 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
803
804 /* check that not all the data is in the first page only */
805 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
806 lli_array[0].block_size = data_size;
807 else
808 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
809
810 /* debug print */
811 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
812
813 /* advance the address to the start of the next page */
814 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
815
816 /* go from the second page to the prev before last */
817 for (count = 1; count < (num_pages - 1); count++) {
818 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
819 lli_array[count].block_size = PAGE_SIZE;
820
821 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
822 next_kernel_address += PAGE_SIZE;
823 }
824
825 /* if more then 1 pages locked - then update for the last page size needed */
826 if (num_pages > 1) {
827 /* update the address of the last page */
828 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
829
830 /* set the size of the last page */
831 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
832
833 if (lli_array[count].block_size == 0) {
834 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
835 dbg("data_size is %lu\n", data_size);
836 while (1);
837 }
838
839 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
840 }
841 /* set output params */
842 *lli_array_ptr = lli_array;
843 *num_pages_ptr = num_pages;
844 *page_array_ptr = 0;
845end_function:
846 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
847 return 0;
848}
849
850/*
851 This function locks all the physical pages of the application virtual buffer
852 and construct a basic lli array, where each entry holds the physical page
853 address and the size that application data holds in this physical pages
854*/
855static int sep_lock_user_pages(struct sep_device *sep,
856 unsigned long app_virt_addr,
857 unsigned long data_size,
858 unsigned long *num_pages_ptr,
859 struct sep_lli_entry_t **lli_array_ptr,
860 struct page ***page_array_ptr)
861{
862 int error = 0;
863 /* the the page of the end address of the user space buffer */
864 unsigned long end_page;
865 /* the page of the start address of the user space buffer */
866 unsigned long start_page;
867 /* the range in pages */
868 unsigned long num_pages;
869 struct page **page_array;
870 struct sep_lli_entry_t *lli_array;
871 unsigned long count;
872 int result;
873
874 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
875
876 /* set start and end pages and num pages */
877 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
878 start_page = app_virt_addr >> PAGE_SHIFT;
879 num_pages = end_page - start_page + 1;
880
881 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
882 edbg("SEP Driver: data_size is %lu\n", data_size);
883 edbg("SEP Driver: start_page is %lu\n", start_page);
884 edbg("SEP Driver: end_page is %lu\n", end_page);
885 edbg("SEP Driver: num_pages is %lu\n", num_pages);
886
887 /* allocate array of pages structure pointers */
888 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
889 if (!page_array) {
890 edbg("SEP Driver: kmalloc for page_array failed\n");
891
892 error = -ENOMEM;
893 goto end_function;
894 }
895
896 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
897 if (!lli_array) {
898 edbg("SEP Driver: kmalloc for lli_array failed\n");
899
900 error = -ENOMEM;
901 goto end_function_with_error1;
902 }
903
904 /* convert the application virtual address into a set of physical */
905 down_read(&current->mm->mmap_sem);
906 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
907 up_read(&current->mm->mmap_sem);
908
909 /* check the number of pages locked - if not all then exit with error */
910 if (result != num_pages) {
911 dbg("SEP Driver: not all pages locked by get_user_pages\n");
912
913 error = -ENOMEM;
914 goto end_function_with_error2;
915 }
916
917 /* flush the cache */
918 for (count = 0; count < num_pages; count++)
919 flush_dcache_page(page_array[count]);
920
921 /* set the start address of the first page - app data may start not at
922 the beginning of the page */
923 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
924
925 /* check that not all the data is in the first page only */
926 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
927 lli_array[0].block_size = data_size;
928 else
929 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
930
931 /* debug print */
932 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
933
934 /* go from the second page to the prev before last */
935 for (count = 1; count < (num_pages - 1); count++) {
936 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
937 lli_array[count].block_size = PAGE_SIZE;
938
939 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
940 }
941
942 /* if more then 1 pages locked - then update for the last page size needed */
943 if (num_pages > 1) {
944 /* update the address of the last page */
945 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
946
947 /* set the size of the last page */
948 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
949
950 if (lli_array[count].block_size == 0) {
951 dbg("app_virt_addr is %08lx\n", app_virt_addr);
952 dbg("data_size is %lu\n", data_size);
953 while (1);
954 }
955 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
956 count, lli_array[count].physical_address,
957 count, lli_array[count].block_size);
958 }
959
960 /* set output params */
961 *lli_array_ptr = lli_array;
962 *num_pages_ptr = num_pages;
963 *page_array_ptr = page_array;
964 goto end_function;
965
966end_function_with_error2:
967 /* release the cache */
968 for (count = 0; count < num_pages; count++)
969 page_cache_release(page_array[count]);
970 kfree(lli_array);
971end_function_with_error1:
972 kfree(page_array);
973end_function:
974 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
975 return 0;
976}
977
978
979/*
980 this function calculates the size of data that can be inserted into the lli
981 table from this array the condition is that either the table is full
982 (all etnries are entered), or there are no more entries in the lli array
983*/
984static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
985{
986 unsigned long table_data_size = 0;
987 unsigned long counter;
988
989 /* calculate the data in the out lli table if till we fill the whole
990 table or till the data has ended */
991 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
992 table_data_size += lli_in_array_ptr[counter].block_size;
993 return table_data_size;
994}
995
996/*
997 this functions builds ont lli table from the lli_array according to
998 the given size of data
999*/
1000static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
1001{
1002 unsigned long curr_table_data_size;
1003 /* counter of lli array entry */
1004 unsigned long array_counter;
1005
1006 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1007
1008 /* init currrent table data size and lli array entry counter */
1009 curr_table_data_size = 0;
1010 array_counter = 0;
1011 *num_table_entries_ptr = 1;
1012
1013 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1014
1015 /* fill the table till table size reaches the needed amount */
1016 while (curr_table_data_size < table_data_size) {
1017 /* update the number of entries in table */
1018 (*num_table_entries_ptr)++;
1019
1020 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1021 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1022 curr_table_data_size += lli_table_ptr->block_size;
1023
1024 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1025 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1026 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1027
1028 /* check for overflow of the table data */
1029 if (curr_table_data_size > table_data_size) {
1030 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1031
1032 /* update the size of block in the table */
1033 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1034
1035 /* update the physical address in the lli array */
1036 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1037
1038 /* update the block size left in the lli array */
1039 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1040 } else
1041 /* advance to the next entry in the lli_array */
1042 array_counter++;
1043
1044 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1045 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1046
1047 /* move to the next entry in table */
1048 lli_table_ptr++;
1049 }
1050
1051 /* set the info entry to default */
1052 lli_table_ptr->physical_address = 0xffffffff;
1053 lli_table_ptr->block_size = 0;
1054
1055 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1056 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1057 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1058
1059 /* set the output parameter */
1060 *num_processed_entries_ptr += array_counter;
1061
1062 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1063 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1064 return;
1065}
1066
1067/*
1068 this function goes over the list of the print created tables and
1069 prints all the data
1070*/
1071static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1072{
1073 unsigned long table_count;
1074 unsigned long entries_count;
1075
1076 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1077
1078 table_count = 1;
1079 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1080 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1081 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1082
1083 /* print entries of the table (without info entry) */
1084 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1085 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1086 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1087 }
1088
1089 /* point to the info entry */
1090 lli_table_ptr--;
1091
1092 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1093 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1094
1095
1096 table_data_size = lli_table_ptr->block_size & 0xffffff;
1097 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1098 lli_table_ptr = (struct sep_lli_entry_t *)
1099 (lli_table_ptr->physical_address);
1100
1101 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1102
1103 if ((unsigned long) lli_table_ptr != 0xffffffff)
1104 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1105
1106 table_count++;
1107 }
1108 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1109}
1110
1111
1112/*
1113 This function prepares only input DMA table for synhronic symmetric
1114 operations (HASH)
1115*/
1116static int sep_prepare_input_dma_table(struct sep_device *sep,
1117 unsigned long app_virt_addr,
1118 unsigned long data_size,
1119 unsigned long block_size,
1120 unsigned long *lli_table_ptr,
1121 unsigned long *num_entries_ptr,
1122 unsigned long *table_data_size_ptr,
1123 bool isKernelVirtualAddress)
1124{
1125 /* pointer to the info entry of the table - the last entry */
1126 struct sep_lli_entry_t *info_entry_ptr;
1127 /* array of pointers ot page */
1128 struct sep_lli_entry_t *lli_array_ptr;
1129 /* points to the first entry to be processed in the lli_in_array */
1130 unsigned long current_entry;
1131 /* num entries in the virtual buffer */
1132 unsigned long sep_lli_entries;
1133 /* lli table pointer */
1134 struct sep_lli_entry_t *in_lli_table_ptr;
1135 /* the total data in one table */
1136 unsigned long table_data_size;
1137 /* number of entries in lli table */
1138 unsigned long num_entries_in_table;
1139 /* next table address */
1140 void *lli_table_alloc_addr;
1141 unsigned long result;
1142
1143 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1144
1145 edbg("SEP Driver:data_size is %lu\n", data_size);
1146 edbg("SEP Driver:block_size is %lu\n", block_size);
1147
1148 /* initialize the pages pointers */
1149 sep->in_page_array = 0;
1150 sep->in_num_pages = 0;
1151
1152 if (data_size == 0) {
1153 /* special case - created 2 entries table with zero data */
1154 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1155 /* FIXME: Should the entry below not be for _bus */
1156 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1157 in_lli_table_ptr->block_size = 0;
1158
1159 in_lli_table_ptr++;
1160 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1161 in_lli_table_ptr->block_size = 0;
1162
1163 *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1164 *num_entries_ptr = 2;
1165 *table_data_size_ptr = 0;
1166
1167 goto end_function;
1168 }
1169
1170 /* check if the pages are in Kernel Virtual Address layout */
1171 if (isKernelVirtualAddress == true)
1172 /* lock the pages of the kernel buffer and translate them to pages */
1173 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1174 else
1175 /* lock the pages of the user buffer and translate them to pages */
1176 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1177
1178 if (result)
1179 return result;
1180
1181 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1182
1183 current_entry = 0;
1184 info_entry_ptr = 0;
1185 sep_lli_entries = sep->in_num_pages;
1186
1187 /* initiate to point after the message area */
1188 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1189
1190 /* loop till all the entries in in array are not processed */
1191 while (current_entry < sep_lli_entries) {
1192 /* set the new input and output tables */
1193 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1194
1195 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1196
1197 /* calculate the maximum size of data for input table */
1198 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1199
1200 /* now calculate the table size so that it will be module block size */
1201 table_data_size = (table_data_size / block_size) * block_size;
1202
1203 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1204
1205 /* construct input lli table */
1206 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1207
1208 if (info_entry_ptr == 0) {
1209 /* set the output parameters to physical addresses */
1210 *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1211 *num_entries_ptr = num_entries_in_table;
1212 *table_data_size_ptr = table_data_size;
1213
1214 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1215 } else {
1216 /* update the info entry of the previous in table */
1217 info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1218 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1219 }
1220
1221 /* save the pointer to the info entry of the current tables */
1222 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1223 }
1224
1225 /* print input tables */
1226 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1227 sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1228
1229 /* the array of the pages */
1230 kfree(lli_array_ptr);
1231end_function:
1232 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1233 return 0;
1234
1235}
1236
1237/*
1238 This function creates the input and output dma tables for
1239 symmetric operations (AES/DES) according to the block size from LLI arays
1240*/
1241static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1242 struct sep_lli_entry_t *lli_in_array,
1243 unsigned long sep_in_lli_entries,
1244 struct sep_lli_entry_t *lli_out_array,
1245 unsigned long sep_out_lli_entries,
1246 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1247{
1248 /* points to the area where next lli table can be allocated: keep void *
1249 as there is pointer scaling to fix otherwise */
1250 void *lli_table_alloc_addr;
1251 /* input lli table */
1252 struct sep_lli_entry_t *in_lli_table_ptr;
1253 /* output lli table */
1254 struct sep_lli_entry_t *out_lli_table_ptr;
1255 /* pointer to the info entry of the table - the last entry */
1256 struct sep_lli_entry_t *info_in_entry_ptr;
1257 /* pointer to the info entry of the table - the last entry */
1258 struct sep_lli_entry_t *info_out_entry_ptr;
1259 /* points to the first entry to be processed in the lli_in_array */
1260 unsigned long current_in_entry;
1261 /* points to the first entry to be processed in the lli_out_array */
1262 unsigned long current_out_entry;
1263 /* max size of the input table */
1264 unsigned long in_table_data_size;
1265 /* max size of the output table */
1266 unsigned long out_table_data_size;
1267 /* flag te signifies if this is the first tables build from the arrays */
1268 unsigned long first_table_flag;
1269 /* the data size that should be in table */
1270 unsigned long table_data_size;
1271 /* number of etnries in the input table */
1272 unsigned long num_entries_in_table;
1273 /* number of etnries in the output table */
1274 unsigned long num_entries_out_table;
1275
1276 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1277
1278 /* initiate to pint after the message area */
1279 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1280
1281 current_in_entry = 0;
1282 current_out_entry = 0;
1283 first_table_flag = 1;
1284 info_in_entry_ptr = 0;
1285 info_out_entry_ptr = 0;
1286
1287 /* loop till all the entries in in array are not processed */
1288 while (current_in_entry < sep_in_lli_entries) {
1289 /* set the new input and output tables */
1290 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1291
1292 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1293
1294 /* set the first output tables */
1295 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1296
1297 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1298
1299 /* calculate the maximum size of data for input table */
1300 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1301
1302 /* calculate the maximum size of data for output table */
1303 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1304
1305 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1306 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1307
1308 /* check where the data is smallest */
1309 table_data_size = in_table_data_size;
1310 if (table_data_size > out_table_data_size)
1311 table_data_size = out_table_data_size;
1312
1313 /* now calculate the table size so that it will be module block size */
1314 table_data_size = (table_data_size / block_size) * block_size;
1315
1316 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1317
1318 /* construct input lli table */
1319 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1320
1321 /* construct output lli table */
1322 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1323
1324 /* if info entry is null - this is the first table built */
1325 if (info_in_entry_ptr == 0) {
1326 /* set the output parameters to physical addresses */
1327 *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1328 *in_num_entries_ptr = num_entries_in_table;
1329 *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1330 *out_num_entries_ptr = num_entries_out_table;
1331 *table_data_size_ptr = table_data_size;
1332
1333 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1334 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1335 } else {
1336 /* update the info entry of the previous in table */
1337 info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1338 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1339
1340 /* update the info entry of the previous in table */
1341 info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1342 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1343 }
1344
1345 /* save the pointer to the info entry of the current tables */
1346 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1347 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1348
1349 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1350 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1351 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1352 }
1353
1354 /* print input tables */
1355 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1356 sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1357 /* print output tables */
1358 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1359 sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1360 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1361 return 0;
1362}
1363
1364
1365/*
1366 This function builds input and output DMA tables for synhronic
1367 symmetric operations (AES, DES). It also checks that each table
1368 is of the modular block size
1369*/
1370static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1371 unsigned long app_virt_in_addr,
1372 unsigned long app_virt_out_addr,
1373 unsigned long data_size,
1374 unsigned long block_size,
1375 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1376{
1377 /* array of pointers of page */
1378 struct sep_lli_entry_t *lli_in_array;
1379 /* array of pointers of page */
1380 struct sep_lli_entry_t *lli_out_array;
1381 int result = 0;
1382
1383 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1384
1385 /* initialize the pages pointers */
1386 sep->in_page_array = 0;
1387 sep->out_page_array = 0;
1388
1389 /* check if the pages are in Kernel Virtual Address layout */
1390 if (isKernelVirtualAddress == true) {
1391 /* lock the pages of the kernel buffer and translate them to pages */
1392 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1393 if (result) {
1394 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1395 goto end_function;
1396 }
1397 } else {
1398 /* lock the pages of the user buffer and translate them to pages */
1399 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1400 if (result) {
1401 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1402 goto end_function;
1403 }
1404 }
1405
1406 if (isKernelVirtualAddress == true) {
1407 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1408 if (result) {
1409 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1410 goto end_function_with_error1;
1411 }
1412 } else {
1413 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1414 if (result) {
1415 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1416 goto end_function_with_error1;
1417 }
1418 }
1419 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1420 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1421 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1422
1423
1424 /* call the fucntion that creates table from the lli arrays */
1425 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1426 if (result) {
1427 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1428 goto end_function_with_error2;
1429 }
1430
1431 /* fall through - free the lli entry arrays */
1432 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1433 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1434 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1435end_function_with_error2:
1436 kfree(lli_out_array);
1437end_function_with_error1:
1438 kfree(lli_in_array);
1439end_function:
1440 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1441 return result;
1442
1443}
1444
1445/*
1446 this function handles tha request for creation of the DMA table
1447 for the synchronic symmetric operations (AES,DES)
1448*/
1449static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1450 unsigned long arg)
1451{
1452 int error;
1453 /* command arguments */
1454 struct sep_driver_build_sync_table_t command_args;
1455
1456 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1457
1458 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1459 if (error) {
1460 error = -EFAULT;
1461 goto end_function;
1462 }
1463
1464 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1465 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1466 edbg("data_size is %lu\n", command_args.data_in_size);
1467 edbg("block_size is %lu\n", command_args.block_size);
1468
1469 /* check if we need to build only input table or input/output */
1470 if (command_args.app_out_address)
1471 /* prepare input and output tables */
1472 error = sep_prepare_input_output_dma_table(sep,
1473 command_args.app_in_address,
1474 command_args.app_out_address,
1475 command_args.data_in_size,
1476 command_args.block_size,
1477 &command_args.in_table_address,
1478 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1479 else
1480 /* prepare input tables */
1481 error = sep_prepare_input_dma_table(sep,
1482 command_args.app_in_address,
1483 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1484
1485 if (error)
1486 goto end_function;
1487 /* copy to user */
1488 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1489 error = -EFAULT;
1490end_function:
1491 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1492 return error;
1493}
1494
1495/*
1496 this function handles the request for freeing dma table for synhronic actions
1497*/
1498static int sep_free_dma_table_data_handler(struct sep_device *sep)
1499{
1500 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1501
1502 /* free input pages array */
1503 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1504
1505 /* free output pages array if needed */
1506 if (sep->out_page_array)
1507 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1508
1509 /* reset all the values */
1510 sep->in_page_array = 0;
1511 sep->out_page_array = 0;
1512 sep->in_num_pages = 0;
1513 sep->out_num_pages = 0;
1514 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1515 return 0;
1516}
1517
1518/*
1519 this function find a space for the new flow dma table
1520*/
1521static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1522 unsigned long **table_address_ptr)
1523{
1524 int error = 0;
1525 /* pointer to the id field of the flow dma table */
1526 unsigned long *start_table_ptr;
1527 /* Do not make start_addr unsigned long * unless fixing the offset
1528 computations ! */
1529 void *flow_dma_area_start_addr;
1530 unsigned long *flow_dma_area_end_addr;
1531 /* maximum table size in words */
1532 unsigned long table_size_in_words;
1533
1534 /* find the start address of the flow DMA table area */
1535 flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1536
1537 /* set end address of the flow table area */
1538 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1539
1540 /* set table size in words */
1541 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1542
1543 /* set the pointer to the start address of DMA area */
1544 start_table_ptr = flow_dma_area_start_addr;
1545
1546 /* find the space for the next table */
1547 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1548 start_table_ptr += table_size_in_words;
1549
1550 /* check if we reached the end of floa tables area */
1551 if (start_table_ptr >= flow_dma_area_end_addr)
1552 error = -1;
1553 else
1554 *table_address_ptr = start_table_ptr;
1555
1556 return error;
1557}
1558
1559/*
1560 This function creates one DMA table for flow and returns its data,
1561 and pointer to its info entry
1562*/
1563static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1564 unsigned long virt_buff_addr,
1565 unsigned long virt_buff_size,
1566 struct sep_lli_entry_t *table_data,
1567 struct sep_lli_entry_t **info_entry_ptr,
1568 struct sep_flow_context_t *flow_data_ptr,
1569 bool isKernelVirtualAddress)
1570{
1571 int error;
1572 /* the range in pages */
1573 unsigned long lli_array_size;
1574 struct sep_lli_entry_t *lli_array;
1575 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1576 unsigned long *start_dma_table_ptr;
1577 /* total table data counter */
1578 unsigned long dma_table_data_count;
1579 /* pointer that will keep the pointer to the pages of the virtual buffer */
1580 struct page **page_array_ptr;
1581 unsigned long entry_count;
1582
1583 /* find the space for the new table */
1584 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1585 if (error)
1586 goto end_function;
1587
1588 /* check if the pages are in Kernel Virtual Address layout */
1589 if (isKernelVirtualAddress == true)
1590 /* lock kernel buffer in the memory */
1591 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1592 else
1593 /* lock user buffer in the memory */
1594 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1595
1596 if (error)
1597 goto end_function;
1598
1599 /* set the pointer to page array at the beginning of table - this table is
1600 now considered taken */
1601 *start_dma_table_ptr = lli_array_size;
1602
1603 /* point to the place of the pages pointers of the table */
1604 start_dma_table_ptr++;
1605
1606 /* set the pages pointer */
1607 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1608
1609 /* set the pointer to the first entry */
1610 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1611
1612 /* now create the entries for table */
1613 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1614 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1615
1616 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1617
1618 /* set the total data of a table */
1619 dma_table_data_count += lli_array[entry_count].block_size;
1620
1621 flow_dma_table_entry_ptr++;
1622 }
1623
1624 /* set the physical address */
1625 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1626
1627 /* set the num_entries and total data size */
1628 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1629
1630 /* set the info entry */
1631 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1632 flow_dma_table_entry_ptr->block_size = 0;
1633
1634 /* set the pointer to info entry */
1635 *info_entry_ptr = flow_dma_table_entry_ptr;
1636
1637 /* the array of the lli entries */
1638 kfree(lli_array);
1639end_function:
1640 return error;
1641}
1642
1643
1644
1645/*
1646 This function creates a list of tables for flow and returns the data for
1647 the first and last tables of the list
1648*/
1649static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1650 unsigned long num_virtual_buffers,
1651 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1652{
1653 int error;
1654 unsigned long virt_buff_addr;
1655 unsigned long virt_buff_size;
1656 struct sep_lli_entry_t table_data;
1657 struct sep_lli_entry_t *info_entry_ptr;
1658 struct sep_lli_entry_t *prev_info_entry_ptr;
1659 unsigned long i;
1660
1661 /* init vars */
1662 error = 0;
1663 prev_info_entry_ptr = 0;
1664
1665 /* init the first table to default */
1666 table_data.physical_address = 0xffffffff;
1667 first_table_data_ptr->physical_address = 0xffffffff;
1668 table_data.block_size = 0;
1669
1670 for (i = 0; i < num_virtual_buffers; i++) {
1671 /* get the virtual buffer address */
1672 error = get_user(virt_buff_addr, &first_buff_addr);
1673 if (error)
1674 goto end_function;
1675
1676 /* get the virtual buffer size */
1677 first_buff_addr++;
1678 error = get_user(virt_buff_size, &first_buff_addr);
1679 if (error)
1680 goto end_function;
1681
1682 /* advance the address to point to the next pair of address|size */
1683 first_buff_addr++;
1684
1685 /* now prepare the one flow LLI table from the data */
1686 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1687 if (error)
1688 goto end_function;
1689
1690 if (i == 0) {
1691 /* if this is the first table - save it to return to the user
1692 application */
1693 *first_table_data_ptr = table_data;
1694
1695 /* set the pointer to info entry */
1696 prev_info_entry_ptr = info_entry_ptr;
1697 } else {
1698 /* not first table - the previous table info entry should
1699 be updated */
1700 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1701
1702 /* set the pointer to info entry */
1703 prev_info_entry_ptr = info_entry_ptr;
1704 }
1705 }
1706
1707 /* set the last table data */
1708 *last_table_data_ptr = table_data;
1709end_function:
1710 return error;
1711}
1712
1713/*
1714 this function goes over all the flow tables connected to the given
1715 table and deallocate them
1716*/
1717static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1718{
1719 /* id pointer */
1720 unsigned long *table_ptr;
1721 /* end address of the flow dma area */
1722 unsigned long num_entries;
1723 unsigned long num_pages;
1724 struct page **pages_ptr;
1725 /* maximum table size in words */
1726 struct sep_lli_entry_t *info_entry_ptr;
1727
1728 /* set the pointer to the first table */
1729 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1730
1731 /* set the num of entries */
1732 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1733 & SEP_NUM_ENTRIES_MASK;
1734
1735 /* go over all the connected tables */
1736 while (*table_ptr != 0xffffffff) {
1737 /* get number of pages */
1738 num_pages = *(table_ptr - 2);
1739
1740 /* get the pointer to the pages */
1741 pages_ptr = (struct page **) (*(table_ptr - 1));
1742
1743 /* free the pages */
1744 sep_free_dma_pages(pages_ptr, num_pages, 1);
1745
1746 /* goto to the info entry */
1747 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1748
1749 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1750 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1751 }
1752
1753 return;
1754}
1755
1756/**
1757 * sep_find_flow_context - find a flow
1758 * @sep: the SEP we are working with
1759 * @flow_id: flow identifier
1760 *
1761 * Returns a pointer the matching flow, or NULL if the flow does not
1762 * exist.
1763 */
1764
1765static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1766 unsigned long flow_id)
1767{
1768 int count;
1769 /*
1770 * always search for flow with id default first - in case we
1771 * already started working on the flow there can be no situation
1772 * when 2 flows are with default flag
1773 */
1774 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1775 if (sep->flows[count].flow_id == flow_id)
1776 return &sep->flows[count];
1777 }
1778 return NULL;
1779}
1780
1781
1782/*
1783 this function handles the request to create the DMA tables for flow
1784*/
1785static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1786 unsigned long arg)
1787{
1788 int error = -ENOENT;
1789 struct sep_driver_build_flow_table_t command_args;
1790 /* first table - output */
1791 struct sep_lli_entry_t first_table_data;
1792 /* dma table data */
1793 struct sep_lli_entry_t last_table_data;
1794 /* pointer to the info entry of the previuos DMA table */
1795 struct sep_lli_entry_t *prev_info_entry_ptr;
1796 /* pointer to the flow data strucutre */
1797 struct sep_flow_context_t *flow_context_ptr;
1798
1799 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1800
1801 /* init variables */
1802 prev_info_entry_ptr = 0;
1803 first_table_data.physical_address = 0xffffffff;
1804
1805 /* find the free structure for flow data */
1806 error = -EINVAL;
1807 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1808 if (flow_context_ptr == NULL)
1809 goto end_function;
1810
1811 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1812 if (error) {
1813 error = -EFAULT;
1814 goto end_function;
1815 }
1816
1817 /* create flow tables */
1818 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1819 if (error)
1820 goto end_function_with_error;
1821
1822 /* check if flow is static */
1823 if (!command_args.flow_type)
1824 /* point the info entry of the last to the info entry of the first */
1825 last_table_data = first_table_data;
1826
1827 /* set output params */
1828 command_args.first_table_addr = first_table_data.physical_address;
1829 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1830 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1831
1832 /* send the parameters to user application */
1833 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1834 if (error) {
1835 error = -EFAULT;
1836 goto end_function_with_error;
1837 }
1838
1839 /* all the flow created - update the flow entry with temp id */
1840 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1841
1842 /* set the processing tables data in the context */
1843 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1844 flow_context_ptr->input_tables_in_process = first_table_data;
1845 else
1846 flow_context_ptr->output_tables_in_process = first_table_data;
1847
1848 goto end_function;
1849
1850end_function_with_error:
1851 /* free the allocated tables */
1852 sep_deallocated_flow_tables(&first_table_data);
1853end_function:
1854 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1855 return error;
1856}
1857
1858/*
1859 this function handles add tables to flow
1860*/
1861static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1862{
1863 int error;
1864 unsigned long num_entries;
1865 struct sep_driver_add_flow_table_t command_args;
1866 struct sep_flow_context_t *flow_context_ptr;
1867 /* first dma table data */
1868 struct sep_lli_entry_t first_table_data;
1869 /* last dma table data */
1870 struct sep_lli_entry_t last_table_data;
1871 /* pointer to the info entry of the current DMA table */
1872 struct sep_lli_entry_t *info_entry_ptr;
1873
1874 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1875
1876 /* get input parameters */
1877 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1878 if (error) {
1879 error = -EFAULT;
1880 goto end_function;
1881 }
1882
1883 /* find the flow structure for the flow id */
1884 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1885 if (flow_context_ptr == NULL)
1886 goto end_function;
1887
1888 /* prepare the flow dma tables */
1889 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1890 if (error)
1891 goto end_function_with_error;
1892
1893 /* now check if there is already an existing add table for this flow */
1894 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1895 /* this buffer was for input buffers */
1896 if (flow_context_ptr->input_tables_flag) {
1897 /* add table already exists - add the new tables to the end
1898 of the previous */
1899 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1900
1901 info_entry_ptr = (struct sep_lli_entry_t *)
1902 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1903
1904 /* connect to list of tables */
1905 *info_entry_ptr = first_table_data;
1906
1907 /* set the first table data */
1908 first_table_data = flow_context_ptr->first_input_table;
1909 } else {
1910 /* set the input flag */
1911 flow_context_ptr->input_tables_flag = 1;
1912
1913 /* set the first table data */
1914 flow_context_ptr->first_input_table = first_table_data;
1915 }
1916 /* set the last table data */
1917 flow_context_ptr->last_input_table = last_table_data;
1918 } else { /* this is output tables */
1919
1920 /* this buffer was for input buffers */
1921 if (flow_context_ptr->output_tables_flag) {
1922 /* add table already exists - add the new tables to
1923 the end of the previous */
1924 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1925
1926 info_entry_ptr = (struct sep_lli_entry_t *)
1927 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1928
1929 /* connect to list of tables */
1930 *info_entry_ptr = first_table_data;
1931
1932 /* set the first table data */
1933 first_table_data = flow_context_ptr->first_output_table;
1934 } else {
1935 /* set the input flag */
1936 flow_context_ptr->output_tables_flag = 1;
1937
1938 /* set the first table data */
1939 flow_context_ptr->first_output_table = first_table_data;
1940 }
1941 /* set the last table data */
1942 flow_context_ptr->last_output_table = last_table_data;
1943 }
1944
1945 /* set output params */
1946 command_args.first_table_addr = first_table_data.physical_address;
1947 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1948 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1949
1950 /* send the parameters to user application */
1951 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1952 if (error)
1953 error = -EFAULT;
1954end_function_with_error:
1955 /* free the allocated tables */
1956 sep_deallocated_flow_tables(&first_table_data);
1957end_function:
1958 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1959 return error;
1960}
1961
1962/*
1963 this function add the flow add message to the specific flow
1964*/
1965static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1966{
1967 int error;
1968 struct sep_driver_add_message_t command_args;
1969 struct sep_flow_context_t *flow_context_ptr;
1970
1971 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1972
1973 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1974 if (error) {
1975 error = -EFAULT;
1976 goto end_function;
1977 }
1978
1979 /* check input */
1980 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1981 error = -ENOMEM;
1982 goto end_function;
1983 }
1984
1985 /* find the flow context */
1986 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1987 if (flow_context_ptr == NULL)
1988 goto end_function;
1989
1990 /* copy the message into context */
1991 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1992 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1993 if (error)
1994 error = -EFAULT;
1995end_function:
1996 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1997 return error;
1998}
1999
2000
2001/*
2002 this function returns the bus and virtual addresses of the static pool
2003*/
2004static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
2005{
2006 int error;
2007 struct sep_driver_static_pool_addr_t command_args;
2008
2009 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
2010
2011 /*prepare the output parameters in the struct */
2012 command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2013 command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2014
2015 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
2016
2017 /* send the parameters to user application */
2018 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
2019 if (error)
2020 error = -EFAULT;
2021 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2022 return error;
2023}
2024
2025/*
2026 this address gets the offset of the physical address from the start
2027 of the mapped area
2028*/
2029static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2030{
2031 int error;
2032 struct sep_driver_get_mapped_offset_t command_args;
2033
2034 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2035
2036 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2037 if (error) {
2038 error = -EFAULT;
2039 goto end_function;
2040 }
2041
2042 if (command_args.physical_address < sep->shared_bus) {
2043 error = -EINVAL;
2044 goto end_function;
2045 }
2046
2047 /*prepare the output parameters in the struct */
2048 command_args.offset = command_args.physical_address - sep->shared_bus;
2049
2050 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2051
2052 /* send the parameters to user application */
2053 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2054 if (error)
2055 error = -EFAULT;
2056end_function:
2057 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2058 return error;
2059}
2060
2061
2062/*
2063 ?
2064*/
2065static int sep_start_handler(struct sep_device *sep)
2066{
2067 unsigned long reg_val;
2068 unsigned long error = 0;
2069
2070 dbg("SEP Driver:--------> sep_start_handler start\n");
2071
2072 /* wait in polling for message from SEP */
2073 do
2074 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2075 while (!reg_val);
2076
2077 /* check the value */
2078 if (reg_val == 0x1)
2079 /* fatal error - read error status from GPRO */
2080 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2081 dbg("SEP Driver:<-------- sep_start_handler end\n");
2082 return error;
2083}
2084
2085/*
2086 this function handles the request for SEP initialization
2087*/
2088static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2089{
2090 unsigned long message_word;
2091 unsigned long *message_ptr;
2092 struct sep_driver_init_t command_args;
2093 unsigned long counter;
2094 unsigned long error;
2095 unsigned long reg_val;
2096
2097 dbg("SEP Driver:--------> sep_init_handler start\n");
2098 error = 0;
2099
2100 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2101 if (error) {
2102 error = -EFAULT;
2103 goto end_function;
2104 }
2105 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n");
2106
2107 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2108 /*sep_configure_dma_burst(); */
2109
2110 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2111
2112 message_ptr = (unsigned long *) command_args.message_addr;
2113
2114 /* set the base address of the SRAM */
2115 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2116
2117 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2118 get_user(message_word, message_ptr);
2119 /* write data to SRAM */
2120 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2121 edbg("SEP Driver:message_word is %lu\n", message_word);
2122 /* wait for write complete */
2123 sep_wait_sram_write(sep);
2124 }
2125 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2126 /* signal SEP */
2127 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2128
2129 do
2130 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2131 while (!(reg_val & 0xFFFFFFFD));
2132
2133 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2134
2135 /* check the value */
2136 if (reg_val == 0x1) {
2137 edbg("SEP Driver:init failed\n");
2138
2139 error = sep_read_reg(sep, 0x8060);
2140 edbg("SEP Driver:sw monitor is %lu\n", error);
2141
2142 /* fatal error - read erro status from GPRO */
2143 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2144 edbg("SEP Driver:error is %lu\n", error);
2145 }
2146end_function:
2147 dbg("SEP Driver:<-------- sep_init_handler end\n");
2148 return error;
2149
2150}
2151
2152/*
2153 this function handles the request cache and resident reallocation
2154*/
2155static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2156 unsigned long arg)
2157{
2158 struct sep_driver_realloc_cache_resident_t command_args;
2159 int error;
2160
2161 /* copy cache and resident to the their intended locations */
2162 error = sep_load_firmware(sep);
2163 if (error)
2164 return error;
2165
2166 command_args.new_base_addr = sep->shared_bus;
2167
2168 /* find the new base address according to the lowest address between
2169 cache, resident and shared area */
2170 if (sep->resident_bus < command_args.new_base_addr)
2171 command_args.new_base_addr = sep->resident_bus;
2172 if (sep->rar_bus < command_args.new_base_addr)
2173 command_args.new_base_addr = sep->rar_bus;
2174
2175 /* set the return parameters */
2176 command_args.new_cache_addr = sep->rar_bus;
2177 command_args.new_resident_addr = sep->resident_bus;
2178
2179 /* set the new shared area */
2180 command_args.new_shared_area_addr = sep->shared_bus;
2181
2182 edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
2183 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2184 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2185 edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
2186
2187 /* return to user */
2188 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2189 return -EFAULT;
2190 return 0;
2191}
2192
2193/**
2194 * sep_get_time_handler - time request from user space
2195 * @sep: sep we are to set the time for
2196 * @arg: pointer to user space arg buffer
2197 *
2198 * This function reports back the time and the address in the SEP
2199 * shared buffer at which it has been placed. (Do we really need this!!!)
2200 */
2201
2202static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2203{
2204 struct sep_driver_get_time_t command_args;
2205
2206 mutex_lock(&sep_mutex);
2207 command_args.time_value = sep_set_time(sep);
2208 command_args.time_physical_address = (unsigned long)sep_time_address(sep);
2209 mutex_unlock(&sep_mutex);
2210 if (copy_to_user((void __user *)arg,
2211 &command_args, sizeof(struct sep_driver_get_time_t)))
2212 return -EFAULT;
2213 return 0;
2214
2215}
2216
2217/*
2218 This API handles the end transaction request
2219*/
2220static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2221{
2222 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2223
2224#if 0 /*!SEP_DRIVER_POLLING_MODE */
2225 /* close IMR */
2226 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2227
2228 /* release IRQ line */
2229 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2230
2231 /* lock the sep mutex */
2232 mutex_unlock(&sep_mutex);
2233#endif
2234
2235 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2236
2237 return 0;
2238}
2239
2240
2241/**
2242 * sep_set_flow_id_handler - handle flow setting
2243 * @sep: the SEP we are configuring
2244 * @flow_id: the flow we are setting
2245 *
2246 * This function handler the set flow id command
2247 */
2248static int sep_set_flow_id_handler(struct sep_device *sep,
2249 unsigned long flow_id)
2250{
2251 int error = 0;
2252 struct sep_flow_context_t *flow_data_ptr;
2253
2254 /* find the flow data structure that was just used for creating new flow
2255 - its id should be default */
2256
2257 mutex_lock(&sep_mutex);
2258 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2259 if (flow_data_ptr)
2260 flow_data_ptr->flow_id = flow_id; /* set flow id */
2261 else
2262 error = -EINVAL;
2263 mutex_unlock(&sep_mutex);
2264 return error;
2265}
2266
2267static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2268{
2269 int error = 0;
2270 struct sep_device *sep = filp->private_data;
2271
2272 dbg("------------>SEP Driver: ioctl start\n");
2273
2274 edbg("SEP Driver: cmd is %x\n", cmd);
2275
2276 switch (cmd) {
2277 case SEP_IOCSENDSEPCOMMAND:
2278 /* send command to SEP */
2279 sep_send_command_handler(sep);
2280 edbg("SEP Driver: after sep_send_command_handler\n");
2281 break;
2282 case SEP_IOCSENDSEPRPLYCOMMAND:
2283 /* send reply command to SEP */
2284 sep_send_reply_command_handler(sep);
2285 break;
2286 case SEP_IOCALLOCDATAPOLL:
2287 /* allocate data pool */
2288 error = sep_allocate_data_pool_memory_handler(sep, arg);
2289 break;
2290 case SEP_IOCWRITEDATAPOLL:
2291 /* write data into memory pool */
2292 error = sep_write_into_data_pool_handler(sep, arg);
2293 break;
2294 case SEP_IOCREADDATAPOLL:
2295 /* read data from data pool into application memory */
2296 error = sep_read_from_data_pool_handler(sep, arg);
2297 break;
2298 case SEP_IOCCREATESYMDMATABLE:
2299 /* create dma table for synhronic operation */
2300 error = sep_create_sync_dma_tables_handler(sep, arg);
2301 break;
2302 case SEP_IOCCREATEFLOWDMATABLE:
2303 /* create flow dma tables */
2304 error = sep_create_flow_dma_tables_handler(sep, arg);
2305 break;
2306 case SEP_IOCFREEDMATABLEDATA:
2307 /* free the pages */
2308 error = sep_free_dma_table_data_handler(sep);
2309 break;
2310 case SEP_IOCSETFLOWID:
2311 /* set flow id */
2312 error = sep_set_flow_id_handler(sep, (unsigned long)arg);
2313 break;
2314 case SEP_IOCADDFLOWTABLE:
2315 /* add tables to the dynamic flow */
2316 error = sep_add_flow_tables_handler(sep, arg);
2317 break;
2318 case SEP_IOCADDFLOWMESSAGE:
2319 /* add message of add tables to flow */
2320 error = sep_add_flow_tables_message_handler(sep, arg);
2321 break;
2322 case SEP_IOCSEPSTART:
2323 /* start command to sep */
2324 error = sep_start_handler(sep);
2325 break;
2326 case SEP_IOCSEPINIT:
2327 /* init command to sep */
2328 error = sep_init_handler(sep, arg);
2329 break;
2330 case SEP_IOCGETSTATICPOOLADDR:
2331 /* get the physical and virtual addresses of the static pool */
2332 error = sep_get_static_pool_addr_handler(sep, arg);
2333 break;
2334 case SEP_IOCENDTRANSACTION:
2335 error = sep_end_transaction_handler(sep, arg);
2336 break;
2337 case SEP_IOCREALLOCCACHERES:
2338 error = sep_realloc_cache_resident_handler(sep, arg);
2339 break;
2340 case SEP_IOCGETMAPPEDADDROFFSET:
2341 error = sep_get_physical_mapped_offset_handler(sep, arg);
2342 break;
2343 case SEP_IOCGETIME:
2344 error = sep_get_time_handler(sep, arg);
2345 break;
2346 default:
2347 error = -ENOTTY;
2348 break;
2349 }
2350 dbg("SEP Driver:<-------- ioctl end\n");
2351 return error;
2352}
2353
2354
2355
2356#if !SEP_DRIVER_POLLING_MODE
2357
2358/* handler for flow done interrupt */
2359
2360static void sep_flow_done_handler(struct work_struct *work)
2361{
2362 struct sep_flow_context_t *flow_data_ptr;
2363
2364 /* obtain the mutex */
2365 mutex_lock(&sep_mutex);
2366
2367 /* get the pointer to context */
2368 flow_data_ptr = (struct sep_flow_context_t *) work;
2369
2370 /* free all the current input tables in sep */
2371 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2372
2373 /* free all the current tables output tables in SEP (if needed) */
2374 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2375 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2376
2377 /* check if we have additional tables to be sent to SEP only input
2378 flag may be checked */
2379 if (flow_data_ptr->input_tables_flag) {
2380 /* copy the message to the shared RAM and signal SEP */
2381 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
2382
2383 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2384 }
2385 mutex_unlock(&sep_mutex);
2386}
2387/*
2388 interrupt handler function
2389*/
2390static irqreturn_t sep_inthandler(int irq, void *dev_id)
2391{
2392 irqreturn_t int_error;
2393 unsigned long reg_val;
2394 unsigned long flow_id;
2395 struct sep_flow_context_t *flow_context_ptr;
2396 struct sep_device *sep = dev_id;
2397
2398 int_error = IRQ_HANDLED;
2399
2400 /* read the IRR register to check if this is SEP interrupt */
2401 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2402 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2403
2404 /* check if this is the flow interrupt */
2405 if (0 /*reg_val & (0x1 << 11) */ ) {
2406 /* read GPRO to find out the which flow is done */
2407 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2408
2409 /* find the contex of the flow */
2410 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2411 if (flow_context_ptr == NULL)
2412 goto end_function_with_error;
2413
2414 /* queue the work */
2415 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2416 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2417
2418 } else {
2419 /* check if this is reply interrupt from SEP */
2420 if (reg_val & (0x1 << 13)) {
2421 /* update the counter of reply messages */
2422 sep->reply_ct++;
2423 /* wake up the waiting process */
2424 wake_up(&sep_event);
2425 } else {
2426 int_error = IRQ_NONE;
2427 goto end_function;
2428 }
2429 }
2430end_function_with_error:
2431 /* clear the interrupt */
2432 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2433end_function:
2434 return int_error;
2435}
2436
2437#endif
2438
2439
2440
2441#if 0
2442
2443static void sep_wait_busy(struct sep_device *sep)
2444{
2445 u32 reg;
2446
2447 do {
2448 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2449 } while (reg);
2450}
2451
2452/*
2453 PATCH for configuring the DMA to single burst instead of multi-burst
2454*/
2455static void sep_configure_dma_burst(struct sep_device *sep)
2456{
2457#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2458
2459 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2460
2461 /* request access to registers from SEP */
2462 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2463
2464 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2465
2466 sep_wait_busy(sep);
2467
2468 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2469
2470 /* set the DMA burst register to single burst */
2471 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2472
2473 /* release the sep busy */
2474 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2475 sep_wait_busy(sep);
2476
2477 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2478
2479}
2480
2481#endif
2482
2483/*
2484 Function that is activated on the successful probe of the SEP device
2485*/
2486static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2487{
2488 int error = 0;
2489 struct sep_device *sep;
2490 int counter;
2491 int size; /* size of memory for allocation */
2492
2493 edbg("Sep pci probe starting\n");
2494 if (sep_dev != NULL) {
2495 dev_warn(&pdev->dev, "only one SEP supported.\n");
2496 return -EBUSY;
2497 }
2498
2499 /* enable the device */
2500 error = pci_enable_device(pdev);
2501 if (error) {
2502 edbg("error enabling pci device\n");
2503 goto end_function;
2504 }
2505
2506 /* set the pci dev pointer */
2507 sep_dev = &sep_instance;
2508 sep = &sep_instance;
2509
2510 edbg("sep->shared_addr = %p\n", sep->shared_addr);
2511 /* transaction counter that coordinates the transactions between SEP
2512 and HOST */
2513 sep->send_ct = 0;
2514 /* counter for the messages from sep */
2515 sep->reply_ct = 0;
2516 /* counter for the number of bytes allocated in the pool
2517 for the current transaction */
2518 sep->data_pool_bytes_allocated = 0;
2519
2520 /* calculate the total size for allocation */
2521 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2522 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2523
2524 /* allocate the shared area */
2525 if (sep_map_and_alloc_shared_area(sep, size)) {
2526 error = -ENOMEM;
2527 /* allocation failed */
2528 goto end_function_error;
2529 }
2530 /* now set the memory regions */
2531#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2532 /* Note: this test section will need moving before it could ever
2533 work as the registers are not yet mapped ! */
2534 /* send the new SHARED MESSAGE AREA to the SEP */
2535 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2536
2537 /* poll for SEP response */
2538 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2539 while (retval != 0xffffffff && retval != sep->shared_bus)
2540 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2541
2542 /* check the return value (register) */
2543 if (retval != sep->shared_bus) {
2544 error = -ENOMEM;
2545 goto end_function_deallocate_sep_shared_area;
2546 }
2547#endif
2548 /* init the flow contextes */
2549 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2550 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2551
2552 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2553 if (sep->flow_wq == NULL) {
2554 error = -ENOMEM;
2555 edbg("sep_driver:flow queue creation failed\n");
2556 goto end_function_deallocate_sep_shared_area;
2557 }
2558 edbg("SEP Driver: create flow workqueue \n");
2559 sep->pdev = pci_dev_get(pdev);
2560
2561 sep->reg_addr = pci_ioremap_bar(pdev, 0);
2562 if (!sep->reg_addr) {
2563 edbg("sep: ioremap of registers failed.\n");
2564 goto end_function_deallocate_sep_shared_area;
2565 }
2566 edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
2567
2568 /* load the rom code */
2569 sep_load_rom_code(sep);
2570
2571 /* set up system base address and shared memory location */
2572 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2573 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2574 &sep->rar_bus, GFP_KERNEL);
2575
2576 if (!sep->rar_addr) {
2577 edbg("SEP Driver:can't allocate rar\n");
2578 goto end_function_uniomap;
2579 }
2580
2581
2582 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2583 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2584
2585#if !SEP_DRIVER_POLLING_MODE
2586
2587 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2588
2589 /* clear ICR register */
2590 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2591
2592 /* set the IMR register - open only GPR 2 */
2593 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2594
2595 edbg("SEP Driver: about to call request_irq\n");
2596 /* get the interrupt line */
2597 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2598 if (error)
2599 goto end_function_free_res;
2600 return 0;
2601 edbg("SEP Driver: about to write IMR REG_ADDR");
2602
2603 /* set the IMR register - open only GPR 2 */
2604 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2605
2606end_function_free_res:
2607 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2608 sep->rar_addr, sep->rar_bus);
2609#endif /* SEP_DRIVER_POLLING_MODE */
2610end_function_uniomap:
2611 iounmap(sep->reg_addr);
2612end_function_deallocate_sep_shared_area:
2613 /* de-allocate shared area */
2614 sep_unmap_and_free_shared_area(sep, size);
2615end_function_error:
2616 sep_dev = NULL;
2617end_function:
2618 return error;
2619}
2620
2621static const struct pci_device_id sep_pci_id_tbl[] = {
2622 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2623 {0}
2624};
2625
2626MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2627
2628/* field for registering driver to PCI device */
2629static struct pci_driver sep_pci_driver = {
2630 .name = "sep_sec_driver",
2631 .id_table = sep_pci_id_tbl,
2632 .probe = sep_probe
2633 /* FIXME: remove handler */
2634};
2635
2636/* major and minor device numbers */
2637static dev_t sep_devno;
2638
2639/* the files operations structure of the driver */
2640static struct file_operations sep_file_operations = {
2641 .owner = THIS_MODULE,
2642 .unlocked_ioctl = sep_ioctl,
2643 .poll = sep_poll,
2644 .open = sep_open,
2645 .release = sep_release,
2646 .mmap = sep_mmap,
2647};
2648
2649
2650/* cdev struct of the driver */
2651static struct cdev sep_cdev;
2652
2653/*
2654 this function registers the driver to the file system
2655*/
2656static int sep_register_driver_to_fs(void)
2657{
2658 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2659 if (ret_val) {
2660 edbg("sep: major number allocation failed, retval is %d\n",
2661 ret_val);
2662 return ret_val;
2663 }
2664 /* init cdev */
2665 cdev_init(&sep_cdev, &sep_file_operations);
2666 sep_cdev.owner = THIS_MODULE;
2667
2668 /* register the driver with the kernel */
2669 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2670 if (ret_val) {
2671 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2672 /* unregister dev numbers */
2673 unregister_chrdev_region(sep_devno, 1);
2674 }
2675 return ret_val;
2676}
2677
2678
2679/*--------------------------------------------------------------
2680 init function
2681----------------------------------------------------------------*/
2682static int __init sep_init(void)
2683{
2684 int ret_val = 0;
2685 dbg("SEP Driver:-------->Init start\n");
2686 /* FIXME: Probe can occur before we are ready to survive a probe */
2687 ret_val = pci_register_driver(&sep_pci_driver);
2688 if (ret_val) {
2689 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2690 goto end_function_unregister_from_fs;
2691 }
2692 /* register driver to fs */
2693 ret_val = sep_register_driver_to_fs();
2694 if (ret_val)
2695 goto end_function_unregister_pci;
2696 goto end_function;
2697end_function_unregister_pci:
2698 pci_unregister_driver(&sep_pci_driver);
2699end_function_unregister_from_fs:
2700 /* unregister from fs */
2701 cdev_del(&sep_cdev);
2702 /* unregister dev numbers */
2703 unregister_chrdev_region(sep_devno, 1);
2704end_function:
2705 dbg("SEP Driver:<-------- Init end\n");
2706 return ret_val;
2707}
2708
2709
2710/*-------------------------------------------------------------
2711 exit function
2712--------------------------------------------------------------*/
2713static void __exit sep_exit(void)
2714{
2715 int size;
2716
2717 dbg("SEP Driver:--------> Exit start\n");
2718
2719 /* unregister from fs */
2720 cdev_del(&sep_cdev);
2721 /* unregister dev numbers */
2722 unregister_chrdev_region(sep_devno, 1);
2723 /* calculate the total size for de-allocation */
2724 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2725 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2726 /* FIXME: We need to do this in the unload for the device */
2727 /* free shared area */
2728 if (sep_dev) {
2729 sep_unmap_and_free_shared_area(sep_dev, size);
2730 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2731 iounmap((void *) sep_dev->reg_addr);
2732 edbg("SEP Driver: iounmap \n");
2733 }
2734 edbg("SEP Driver: release_mem_region \n");
2735 dbg("SEP Driver:<-------- Exit end\n");
2736}
2737
2738
2739module_init(sep_init);
2740module_exit(sep_exit);
2741
2742MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
deleted file mode 100644
index 7ef16da7c4ef..000000000000
--- a/drivers/staging/sep/sep_driver_api.h
+++ /dev/null
@@ -1,425 +0,0 @@
1/*
2 *
3 * sep_driver_api.h - Security Processor Driver api definitions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#ifndef __SEP_DRIVER_API_H__
33#define __SEP_DRIVER_API_H__
34
35
36
37/*----------------------------------------------------------------
38 IOCTL command defines
39 -----------------------------------------------------------------*/
40
41/* magic number 1 of the sep IOCTL command */
42#define SEP_IOC_MAGIC_NUMBER 's'
43
44/* sends interrupt to sep that message is ready */
45#define SEP_IOCSENDSEPCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 0)
46
47/* sends interrupt to sep that message is ready */
48#define SEP_IOCSENDSEPRPLYCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 1)
49
50/* allocate memory in data pool */
51#define SEP_IOCALLOCDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 2)
52
53/* write to pre-allocated memory in data pool */
54#define SEP_IOCWRITEDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 3)
55
56/* read from pre-allocated memory in data pool */
57#define SEP_IOCREADDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 4)
58
59/* create sym dma lli tables */
60#define SEP_IOCCREATESYMDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 5)
61
62/* create flow dma lli tables */
63#define SEP_IOCCREATEFLOWDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 6)
64
65/* free dynamic data aalocated during table creation */
66#define SEP_IOCFREEDMATABLEDATA _IO(SEP_IOC_MAGIC_NUMBER , 7)
67
68/* get the static pool area addresses (physical and virtual) */
69#define SEP_IOCGETSTATICPOOLADDR _IO(SEP_IOC_MAGIC_NUMBER , 8)
70
71/* set flow id command */
72#define SEP_IOCSETFLOWID _IO(SEP_IOC_MAGIC_NUMBER , 9)
73
74/* add tables to the dynamic flow */
75#define SEP_IOCADDFLOWTABLE _IO(SEP_IOC_MAGIC_NUMBER , 10)
76
77/* add flow add tables message */
78#define SEP_IOCADDFLOWMESSAGE _IO(SEP_IOC_MAGIC_NUMBER , 11)
79
80/* start sep command */
81#define SEP_IOCSEPSTART _IO(SEP_IOC_MAGIC_NUMBER , 12)
82
83/* init sep command */
84#define SEP_IOCSEPINIT _IO(SEP_IOC_MAGIC_NUMBER , 13)
85
86/* end transaction command */
87#define SEP_IOCENDTRANSACTION _IO(SEP_IOC_MAGIC_NUMBER , 15)
88
89/* reallocate cache and resident */
90#define SEP_IOCREALLOCCACHERES _IO(SEP_IOC_MAGIC_NUMBER , 16)
91
92/* get the offset of the address starting from the beginnnig of the map area */
93#define SEP_IOCGETMAPPEDADDROFFSET _IO(SEP_IOC_MAGIC_NUMBER , 17)
94
95/* get time address and value */
96#define SEP_IOCGETIME _IO(SEP_IOC_MAGIC_NUMBER , 19)
97
98/*-------------------------------------------
99 TYPEDEFS
100----------------------------------------------*/
101
102/*
103 init command struct
104*/
105struct sep_driver_init_t {
106 /* start of the 1G of the host memory address that SEP can access */
107 unsigned long message_addr;
108
109 /* start address of resident */
110 unsigned long message_size_in_words;
111
112};
113
114
115/*
116 realloc cache resident command
117*/
118struct sep_driver_realloc_cache_resident_t {
119 /* new cache address */
120 u64 new_cache_addr;
121 /* new resident address */
122 u64 new_resident_addr;
123 /* new resident address */
124 u64 new_shared_area_addr;
125 /* new base address */
126 u64 new_base_addr;
127};
128
129struct sep_driver_alloc_t {
130 /* virtual address of allocated space */
131 unsigned long offset;
132
133 /* physical address of allocated space */
134 unsigned long phys_address;
135
136 /* number of bytes to allocate */
137 unsigned long num_bytes;
138};
139
140/*
141 */
142struct sep_driver_write_t {
143 /* application space address */
144 unsigned long app_address;
145
146 /* address of the data pool */
147 unsigned long datapool_address;
148
149 /* number of bytes to write */
150 unsigned long num_bytes;
151};
152
153/*
154 */
155struct sep_driver_read_t {
156 /* application space address */
157 unsigned long app_address;
158
159 /* address of the data pool */
160 unsigned long datapool_address;
161
162 /* number of bytes to read */
163 unsigned long num_bytes;
164};
165
166/*
167*/
168struct sep_driver_build_sync_table_t {
169 /* address value of the data in */
170 unsigned long app_in_address;
171
172 /* size of data in */
173 unsigned long data_in_size;
174
175 /* address of the data out */
176 unsigned long app_out_address;
177
178 /* the size of the block of the operation - if needed,
179 every table will be modulo this parameter */
180 unsigned long block_size;
181
182 /* the physical address of the first input DMA table */
183 unsigned long in_table_address;
184
185 /* number of entries in the first input DMA table */
186 unsigned long in_table_num_entries;
187
188 /* the physical address of the first output DMA table */
189 unsigned long out_table_address;
190
191 /* number of entries in the first output DMA table */
192 unsigned long out_table_num_entries;
193
194 /* data in the first input table */
195 unsigned long table_data_size;
196
197 /* distinct user/kernel layout */
198 bool isKernelVirtualAddress;
199
200};
201
202/*
203*/
204struct sep_driver_build_flow_table_t {
205 /* flow type */
206 unsigned long flow_type;
207
208 /* flag for input output */
209 unsigned long input_output_flag;
210
211 /* address value of the data in */
212 unsigned long virt_buff_data_addr;
213
214 /* size of data in */
215 unsigned long num_virtual_buffers;
216
217 /* the physical address of the first input DMA table */
218 unsigned long first_table_addr;
219
220 /* number of entries in the first input DMA table */
221 unsigned long first_table_num_entries;
222
223 /* data in the first input table */
224 unsigned long first_table_data_size;
225
226 /* distinct user/kernel layout */
227 bool isKernelVirtualAddress;
228};
229
230
231struct sep_driver_add_flow_table_t {
232 /* flow id */
233 unsigned long flow_id;
234
235 /* flag for input output */
236 unsigned long inputOutputFlag;
237
238 /* address value of the data in */
239 unsigned long virt_buff_data_addr;
240
241 /* size of data in */
242 unsigned long num_virtual_buffers;
243
244 /* address of the first table */
245 unsigned long first_table_addr;
246
247 /* number of entries in the first table */
248 unsigned long first_table_num_entries;
249
250 /* data size of the first table */
251 unsigned long first_table_data_size;
252
253 /* distinct user/kernel layout */
254 bool isKernelVirtualAddress;
255
256};
257
258/*
259 command struct for set flow id
260*/
261struct sep_driver_set_flow_id_t {
262 /* flow id to set */
263 unsigned long flow_id;
264};
265
266
267/* command struct for add tables message */
268struct sep_driver_add_message_t {
269 /* flow id to set */
270 unsigned long flow_id;
271
272 /* message size in bytes */
273 unsigned long message_size_in_bytes;
274
275 /* address of the message */
276 unsigned long message_address;
277};
278
279/* command struct for static pool addresses */
280struct sep_driver_static_pool_addr_t {
281 /* physical address of the static pool */
282 unsigned long physical_static_address;
283
284 /* virtual address of the static pool */
285 unsigned long virtual_static_address;
286};
287
288/* command struct for getiing offset of the physical address from
289 the start of the mapped area */
290struct sep_driver_get_mapped_offset_t {
291 /* physical address of the static pool */
292 unsigned long physical_address;
293
294 /* virtual address of the static pool */
295 unsigned long offset;
296};
297
298/* command struct for getting time value and address */
299struct sep_driver_get_time_t {
300 /* physical address of stored time */
301 unsigned long time_physical_address;
302
303 /* value of the stored time */
304 unsigned long time_value;
305};
306
307
308/*
309 structure that represent one entry in the DMA LLI table
310*/
311struct sep_lli_entry_t {
312 /* physical address */
313 unsigned long physical_address;
314
315 /* block size */
316 unsigned long block_size;
317};
318
319/*
320 structure that reperesents data needed for lli table construction
321*/
322struct sep_lli_prepare_table_data_t {
323 /* pointer to the memory where the first lli entry to be built */
324 struct sep_lli_entry_t *lli_entry_ptr;
325
326 /* pointer to the array of lli entries from which the table is to be built */
327 struct sep_lli_entry_t *lli_array_ptr;
328
329 /* number of elements in lli array */
330 int lli_array_size;
331
332 /* number of entries in the created table */
333 int num_table_entries;
334
335 /* number of array entries processed during table creation */
336 int num_array_entries_processed;
337
338 /* the totatl data size in the created table */
339 int lli_table_total_data_size;
340};
341
342/*
343 structure that represent tone table - it is not used in code, jkust
344 to show what table looks like
345*/
346struct sep_lli_table_t {
347 /* number of pages mapped in this tables. If 0 - means that the table
348 is not defined (used as a valid flag) */
349 unsigned long num_pages;
350 /*
351 pointer to array of page pointers that represent the mapping of the
352 virtual buffer defined by the table to the physical memory. If this
353 pointer is NULL, it means that the table is not defined
354 (used as a valid flag)
355 */
356 struct page **table_page_array_ptr;
357
358 /* maximum flow entries in table */
359 struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE];
360};
361
362
363/*
364 structure for keeping the mapping of the virtual buffer into physical pages
365*/
366struct sep_flow_buffer_data {
367 /* pointer to the array of page structs pointers to the pages of the
368 virtual buffer */
369 struct page **page_array_ptr;
370
371 /* number of pages taken by the virtual buffer */
372 unsigned long num_pages;
373
374 /* this flag signals if this page_array is the last one among many that were
375 sent in one setting to SEP */
376 unsigned long last_page_array_flag;
377};
378
379/*
380 struct that keeps all the data for one flow
381*/
382struct sep_flow_context_t {
383 /*
384 work struct for handling the flow done interrupt in the workqueue
385 this structure must be in the first place, since it will be used
386 forcasting to the containing flow context
387 */
388 struct work_struct flow_wq;
389
390 /* flow id */
391 unsigned long flow_id;
392
393 /* additional input tables exists */
394 unsigned long input_tables_flag;
395
396 /* additional output tables exists */
397 unsigned long output_tables_flag;
398
399 /* data of the first input file */
400 struct sep_lli_entry_t first_input_table;
401
402 /* data of the first output table */
403 struct sep_lli_entry_t first_output_table;
404
405 /* last input table data */
406 struct sep_lli_entry_t last_input_table;
407
408 /* last output table data */
409 struct sep_lli_entry_t last_output_table;
410
411 /* first list of table */
412 struct sep_lli_entry_t input_tables_in_process;
413
414 /* output table in process (in sep) */
415 struct sep_lli_entry_t output_tables_in_process;
416
417 /* size of messages in bytes */
418 unsigned long message_size_in_bytes;
419
420 /* message */
421 unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES];
422};
423
424
425#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
deleted file mode 100644
index 6008fe5eca09..000000000000
--- a/drivers/staging/sep/sep_driver_config.h
+++ /dev/null
@@ -1,225 +0,0 @@
1/*
2 *
3 * sep_driver_config.h - Security Processor Driver configuration
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#ifndef __SEP_DRIVER_CONFIG_H__
33#define __SEP_DRIVER_CONFIG_H__
34
35
36/*--------------------------------------
37 DRIVER CONFIGURATION FLAGS
38 -------------------------------------*/
39
40/* if flag is on , then the driver is running in polling and
41 not interrupt mode */
42#define SEP_DRIVER_POLLING_MODE 1
43
44/* flag which defines if the shared area address should be
45 reconfiged (send to SEP anew) during init of the driver */
46#define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0
47
48/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
49#define SEP_DRIVER_ARM_DEBUG_MODE 0
50
51/*-------------------------------------------
52 INTERNAL DATA CONFIGURATION
53 -------------------------------------------*/
54
55/* flag for the input array */
56#define SEP_DRIVER_IN_FLAG 0
57
58/* flag for output array */
59#define SEP_DRIVER_OUT_FLAG 1
60
61/* maximum number of entries in one LLI tables */
62#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 8
63
64
65/*--------------------------------------------------------
66 SHARED AREA memory total size is 36K
67 it is divided is following:
68
69 SHARED_MESSAGE_AREA 8K }
70 }
71 STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K)
72 }
73 DATA_POOL_AREA 12K }
74
75 SYNCHRONIC_DMA_TABLES_AREA 5K
76
77 FLOW_DMA_TABLES_AREA 4K
78
79 SYSTEM_MEMORY_AREA 3k
80
81 SYSTEM_MEMORY total size is 3k
82 it is divided as following:
83
84 TIME_MEMORY_AREA 8B
85-----------------------------------------------------------*/
86
87
88
89/*
90 the maximum length of the message - the rest of the message shared
91 area will be dedicated to the dma lli tables
92*/
93#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024)
94
95/* the size of the message shared area in pages */
96#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024)
97
98/* the size of the data pool static area in pages */
99#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024)
100
101/* the size of the data pool shared area size in pages */
102#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (12 * 1024)
103
104/* the size of the message shared area in pages */
105#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 5)
106
107
108/* the size of the data pool shared area size in pages */
109#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
110
111/* system data (time, caller id etc') pool */
112#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES 100
113
114
115/* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and
116 DATA POOL areas. area must be module 4k */
117#define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 24)
118
119
120/*-----------------------------------------------
121 offsets of the areas starting from the shared area start address
122*/
123
124/* message area offset */
125#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0
126
127/* static pool area offset */
128#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
129 (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
130
131/* data pool area offset */
132#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
133 (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
134 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
135
136/* synhronic dma tables area offset */
137#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \
138 (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
139 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
140
141/* sep driver flow dma tables area offset */
142#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \
143 (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
144 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES)
145
146/* system memory offset in bytes */
147#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
148 (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
149 SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES)
150
151/* offset of the time area */
152#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
153 (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
154
155
156
157/* start physical address of the SEP registers memory in HOST */
158#define SEP_IO_MEM_REGION_START_ADDRESS 0x80000000
159
160/* size of the SEP registers memory region in HOST (for now 100 registers) */
161#define SEP_IO_MEM_REGION_SIZE (2 * 0x100000)
162
163/* define the number of IRQ for SEP interrupts */
164#define SEP_DIRVER_IRQ_NUM 1
165
166/* maximum number of add buffers */
167#define SEP_MAX_NUM_ADD_BUFFERS 100
168
169/* number of flows */
170#define SEP_DRIVER_NUM_FLOWS 4
171
172/* maximum number of entries in flow table */
173#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE 25
174
175/* offset of the num entries in the block length entry of the LLI */
176#define SEP_NUM_ENTRIES_OFFSET_IN_BITS 24
177
178/* offset of the interrupt flag in the block length entry of the LLI */
179#define SEP_INT_FLAG_OFFSET_IN_BITS 31
180
181/* mask for extracting data size from LLI */
182#define SEP_TABLE_DATA_SIZE_MASK 0xFFFFFF
183
184/* mask for entries after being shifted left */
185#define SEP_NUM_ENTRIES_MASK 0x7F
186
187/* default flow id */
188#define SEP_FREE_FLOW_ID 0xFFFFFFFF
189
190/* temp flow id used during cretiong of new flow until receiving
191 real flow id from sep */
192#define SEP_TEMP_FLOW_ID (SEP_DRIVER_NUM_FLOWS + 1)
193
194/* maximum add buffers message length in bytes */
195#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES (7 * 4)
196
197/* maximum number of concurrent virtual buffers */
198#define SEP_MAX_VIRT_BUFFERS_CONCURRENT 100
199
200/* the token that defines the start of time address */
201#define SEP_TIME_VAL_TOKEN 0x12345678
202
203/* DEBUG LEVEL MASKS */
204#define SEP_DEBUG_LEVEL_BASIC 0x1
205
206#define SEP_DEBUG_LEVEL_EXTENDED 0x4
207
208
209/* Debug helpers */
210
211#define dbg(fmt, args...) \
212do {\
213 if (debug & SEP_DEBUG_LEVEL_BASIC) \
214 printk(KERN_DEBUG fmt, ##args); \
215} while(0);
216
217#define edbg(fmt, args...) \
218do { \
219 if (debug & SEP_DEBUG_LEVEL_EXTENDED) \
220 printk(KERN_DEBUG fmt, ##args); \
221} while(0);
222
223
224
225#endif
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
deleted file mode 100644
index ea6abd8a14b4..000000000000
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 *
3 * sep_driver_hw_defs.h - Security Processor Driver hardware definitions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#ifndef SEP_DRIVER_HW_DEFS__H
33#define SEP_DRIVER_HW_DEFS__H
34
35/*--------------------------------------------------------------------------*/
36/* Abstract: HW Registers Defines. */
37/* */
38/* Note: This file was automatically created !!! */
39/* DO NOT EDIT THIS FILE !!! */
40/*--------------------------------------------------------------------------*/
41
42
43/* cf registers */
44#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
45#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
46#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
47#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
48#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
49#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
50#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
51#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
52#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
53#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
54#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
55#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
56#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
57#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
58#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
59#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
60#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
61#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
62#define HW_R3B_REG_ADDR 0x00C0UL
63#define HW_R4B_REG_ADDR 0x0100UL
64#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
65#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
66#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
67#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
68#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
69#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
70#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
71#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
72#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
73#define HW_CSA_REG_ADDR 0x0140UL
74#define HW_SINB_REG_ADDR 0x0180UL
75#define HW_SOUTB_REG_ADDR 0x0184UL
76#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
77#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
78#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
79#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
80#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
81#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
82#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
83#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
84#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
85#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
86#define HW_PKI_CLR_REG_ADDR 0x01E8UL
87#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
88#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
89#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
90#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
91#define HW_DES_KEY_0_REG_ADDR 0x0208UL
92#define HW_DES_KEY_1_REG_ADDR 0x020CUL
93#define HW_DES_KEY_2_REG_ADDR 0x0210UL
94#define HW_DES_KEY_3_REG_ADDR 0x0214UL
95#define HW_DES_KEY_4_REG_ADDR 0x0218UL
96#define HW_DES_KEY_5_REG_ADDR 0x021CUL
97#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
98#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
99#define HW_DES_IV_0_REG_ADDR 0x0228UL
100#define HW_DES_IV_1_REG_ADDR 0x022CUL
101#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
102#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
103#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
104#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
105#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
106#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
107#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
108#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
109#define HW_AES_KEY_0_REG_ADDR 0x0400UL
110#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
111#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
112#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
113#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
114#define HW_AES_IV_0_REG_ADDR 0x0440UL
115#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
116#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
117#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
118#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
119#define HW_AES_CTR1_REG_ADDR 0x0460UL
120#define HW_AES_SK_REG_ADDR 0x0478UL
121#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
122#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
123#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
124#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
125#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
126#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
127#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
128#define HW_HASH_H0_REG_ADDR 0x0640UL
129#define HW_HASH_H1_REG_ADDR 0x0644UL
130#define HW_HASH_H2_REG_ADDR 0x0648UL
131#define HW_HASH_H3_REG_ADDR 0x064CUL
132#define HW_HASH_H4_REG_ADDR 0x0650UL
133#define HW_HASH_H5_REG_ADDR 0x0654UL
134#define HW_HASH_H6_REG_ADDR 0x0658UL
135#define HW_HASH_H7_REG_ADDR 0x065CUL
136#define HW_HASH_H8_REG_ADDR 0x0660UL
137#define HW_HASH_H9_REG_ADDR 0x0664UL
138#define HW_HASH_H10_REG_ADDR 0x0668UL
139#define HW_HASH_H11_REG_ADDR 0x066CUL
140#define HW_HASH_H12_REG_ADDR 0x0670UL
141#define HW_HASH_H13_REG_ADDR 0x0674UL
142#define HW_HASH_H14_REG_ADDR 0x0678UL
143#define HW_HASH_H15_REG_ADDR 0x067CUL
144#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
145#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
146#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
147#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
148#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
149#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
150#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
151#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
152#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
153#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
154#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
155#define HW_HASH_DATA_REG_ADDR 0x07ECUL
156#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
157#define HW_DRNG_VALID_REG_ADDR 0x0804UL
158#define HW_DRNG_DATA_REG_ADDR 0x0808UL
159#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
160#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
161#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
162#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
163#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
164#define HW_CLK_STATUS_REG_ADDR 0x0824UL
165#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
166#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
167#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
168#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
169#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
170#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
171#define HW_AES_BUSY_REG_ADDR 0x0914UL
172#define HW_DES_BUSY_REG_ADDR 0x0918UL
173#define HW_HASH_BUSY_REG_ADDR 0x091CUL
174#define HW_CONTENT_REG_ADDR 0x0924UL
175#define HW_VERSION_REG_ADDR 0x0928UL
176#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
177#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
178#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
179#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
180#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
181#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
182#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
183#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
184#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
185#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
186#define HW_OLD_DATA_REG_ADDR 0x0C48UL
187#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
188#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
189#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
190#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
191#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
192#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
193#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
194#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
195#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
196#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
197#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
198#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
199#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
200#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
201#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
202#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
203#define HW_HOST_IRR_REG_ADDR 0x0A00UL
204#define HW_HOST_IMR_REG_ADDR 0x0A04UL
205#define HW_HOST_ICR_REG_ADDR 0x0A08UL
206#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
207#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
208#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
209#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
210#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
211#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
212#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
213#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
214#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
215#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
216#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
217#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
218#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
219#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
220#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
221#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
222#define HW_HOST_SEP_HOST_GPR3_REG_ADDR 0x0B0CUL
223#define HW_HOST_HOST_SEP_GPR0_REG_ADDR 0x0B80UL
224#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
225#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
226#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
227#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
228#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
229#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
230#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
231
232#endif /* ifndef HW_DEFS */
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
index 5e2ffefb60af..d231ae27299d 100644
--- a/drivers/staging/spectra/Kconfig
+++ b/drivers/staging/spectra/Kconfig
@@ -2,6 +2,7 @@
2menuconfig SPECTRA 2menuconfig SPECTRA
3 tristate "Denali Spectra Flash Translation Layer" 3 tristate "Denali Spectra Flash Translation Layer"
4 depends on BLOCK 4 depends on BLOCK
5 depends on X86_MRST
5 default n 6 default n
6 ---help--- 7 ---help---
7 Enable the FTL pseudo-filesystem used with the NAND Flash 8 Enable the FTL pseudo-filesystem used with the NAND Flash
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index d0c5c97eda3e..fa21a0fd8e84 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -27,6 +27,8 @@
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/log2.h> 28#include <linux/log2.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/smp_lock.h>
31#include <linux/slab.h>
30 32
31/**** Helper functions used for Div, Remainder operation on u64 ****/ 33/**** Helper functions used for Div, Remainder operation on u64 ****/
32 34
@@ -113,7 +115,6 @@ u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
113 115
114#define GLOB_SBD_NAME "nd" 116#define GLOB_SBD_NAME "nd"
115#define GLOB_SBD_IRQ_NUM (29) 117#define GLOB_SBD_IRQ_NUM (29)
116#define GLOB_VERSION "driver version 20091110"
117 118
118#define GLOB_SBD_IOCTL_GC (0x7701) 119#define GLOB_SBD_IOCTL_GC (0x7701)
119#define GLOB_SBD_IOCTL_WL (0x7702) 120#define GLOB_SBD_IOCTL_WL (0x7702)
@@ -272,13 +273,6 @@ static int get_res_blk_num_os(void)
272 return res_blks; 273 return res_blks;
273} 274}
274 275
275static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
276{
277 rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
278 /* rq->timeout = 5 * HZ; */
279 rq->cmd[0] = REQ_LB_OP_FLUSH;
280}
281
282/* Transfer a full request. */ 276/* Transfer a full request. */
283static int do_transfer(struct spectra_nand_dev *tr, struct request *req) 277static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
284{ 278{
@@ -296,8 +290,7 @@ static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
296 IdentifyDeviceData.PagesPerBlock * 290 IdentifyDeviceData.PagesPerBlock *
297 res_blks_os; 291 res_blks_os;
298 292
299 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 293 if (req->cmd_type & REQ_FLUSH) {
300 req->cmd[0] == REQ_LB_OP_FLUSH) {
301 if (force_flush_cache()) /* Fail to flush cache */ 294 if (force_flush_cache()) /* Fail to flush cache */
302 return -EIO; 295 return -EIO;
303 else 296 else
@@ -597,11 +590,23 @@ int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
597 return -ENOTTY; 590 return -ENOTTY;
598} 591}
599 592
593int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
594 unsigned int cmd, unsigned long arg)
595{
596 int ret;
597
598 lock_kernel();
599 ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
600 unlock_kernel();
601
602 return ret;
603}
604
600static struct block_device_operations GLOB_SBD_ops = { 605static struct block_device_operations GLOB_SBD_ops = {
601 .owner = THIS_MODULE, 606 .owner = THIS_MODULE,
602 .open = GLOB_SBD_open, 607 .open = GLOB_SBD_open,
603 .release = GLOB_SBD_release, 608 .release = GLOB_SBD_release,
604 .locked_ioctl = GLOB_SBD_ioctl, 609 .ioctl = GLOB_SBD_unlocked_ioctl,
605 .getgeo = GLOB_SBD_getgeo, 610 .getgeo = GLOB_SBD_getgeo,
606}; 611};
607 612
@@ -650,8 +655,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
650 /* Here we force report 512 byte hardware sector size to Kernel */ 655 /* Here we force report 512 byte hardware sector size to Kernel */
651 blk_queue_logical_block_size(dev->queue, 512); 656 blk_queue_logical_block_size(dev->queue, 512);
652 657
653 blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH, 658 blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
654 SBD_prepare_flush);
655 659
656 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd"); 660 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
657 if (IS_ERR(dev->thread)) { 661 if (IS_ERR(dev->thread)) {
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
index 134aa5166a8d..9b5218b6ada8 100644
--- a/drivers/staging/spectra/flash.c
+++ b/drivers/staging/spectra/flash.c
@@ -61,7 +61,6 @@ static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
61static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr, 61static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
62 u8 cache_blk, u16 flag); 62 u8 cache_blk, u16 flag);
63static int FTL_Cache_Write(void); 63static int FTL_Cache_Write(void);
64static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
65static void FTL_Calculate_LRU(void); 64static void FTL_Calculate_LRU(void);
66static u32 FTL_Get_Block_Index(u32 wBlockNum); 65static u32 FTL_Get_Block_Index(u32 wBlockNum);
67 66
@@ -86,8 +85,6 @@ static u32 FTL_Replace_MWBlock(void);
86static int FTL_Replace_Block(u64 blk_addr); 85static int FTL_Replace_Block(u64 blk_addr);
87static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX); 86static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
88 87
89static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
90
91struct device_info_tag DeviceInfo; 88struct device_info_tag DeviceInfo;
92struct flash_cache_tag Cache; 89struct flash_cache_tag Cache;
93static struct spectra_l2_cache_info cache_l2; 90static struct spectra_l2_cache_info cache_l2;
@@ -775,7 +772,7 @@ static void dump_cache_l2_table(void)
775{ 772{
776 struct list_head *p; 773 struct list_head *p;
777 struct spectra_l2_cache_list *pnd; 774 struct spectra_l2_cache_list *pnd;
778 int n, i; 775 int n;
779 776
780 n = 0; 777 n = 0;
781 list_for_each(p, &cache_l2.table.list) { 778 list_for_each(p, &cache_l2.table.list) {
@@ -1538,79 +1535,6 @@ static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
1538} 1535}
1539 1536
1540/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& 1537/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1541* Function: FTL_Cache_Update_Block
1542* Inputs: pointer to buffer,page address,block address
1543* Outputs: PASS=0 / FAIL=1
1544* Description: It updates the cache
1545*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
1546static int FTL_Cache_Update_Block(u8 *pData,
1547 u64 old_page_addr, u64 blk_addr)
1548{
1549 int i, j;
1550 u8 *buf = pData;
1551 int wResult = PASS;
1552 int wFoundInCache;
1553 u64 page_addr;
1554 u64 addr;
1555 u64 old_blk_addr;
1556 u16 page_offset;
1557
1558 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
1559 __FILE__, __LINE__, __func__);
1560
1561 old_blk_addr = (u64)(old_page_addr >>
1562 DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
1563 page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
1564 DeviceInfo.nBitsInPageDataSize);
1565
1566 for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
1567 page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
1568 if (i != page_offset) {
1569 wFoundInCache = FAIL;
1570 for (j = 0; j < CACHE_ITEM_NUM; j++) {
1571 addr = Cache.array[j].address;
1572 addr = FTL_Get_Physical_Block_Addr(addr) +
1573 GLOB_u64_Remainder(addr, 2);
1574 if ((addr >= page_addr) && addr <
1575 (page_addr + Cache.cache_item_size)) {
1576 wFoundInCache = PASS;
1577 buf = Cache.array[j].buf;
1578 Cache.array[j].changed = SET;
1579#if CMD_DMA
1580#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
1581 int_cache[ftl_cmd_cnt].item = j;
1582 int_cache[ftl_cmd_cnt].cache.address =
1583 Cache.array[j].address;
1584 int_cache[ftl_cmd_cnt].cache.changed =
1585 Cache.array[j].changed;
1586#endif
1587#endif
1588 break;
1589 }
1590 }
1591 if (FAIL == wFoundInCache) {
1592 if (ERR == FTL_Cache_Read_All(g_pTempBuf,
1593 page_addr)) {
1594 wResult = FAIL;
1595 break;
1596 }
1597 buf = g_pTempBuf;
1598 }
1599 } else {
1600 buf = pData;
1601 }
1602
1603 if (FAIL == FTL_Cache_Write_All(buf,
1604 blk_addr + (page_addr - old_blk_addr))) {
1605 wResult = FAIL;
1606 break;
1607 }
1608 }
1609
1610 return wResult;
1611}
1612
1613/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
1614* Function: FTL_Copy_Block 1538* Function: FTL_Copy_Block
1615* Inputs: source block address 1539* Inputs: source block address
1616* Destination block address 1540* Destination block address
@@ -1698,7 +1622,7 @@ static int get_l2_cache_blks(void)
1698static int erase_l2_cache_blocks(void) 1622static int erase_l2_cache_blocks(void)
1699{ 1623{
1700 int i, ret = PASS; 1624 int i, ret = PASS;
1701 u32 pblk, lblk; 1625 u32 pblk, lblk = BAD_BLOCK;
1702 u64 addr; 1626 u64 addr;
1703 u32 *pbt = (u32 *)g_pBlockTable; 1627 u32 *pbt = (u32 *)g_pBlockTable;
1704 1628
@@ -2004,87 +1928,6 @@ static int search_l2_cache(u8 *buf, u64 logical_addr)
2004 return ret; 1928 return ret;
2005} 1929}
2006 1930
2007/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2008* Function: FTL_Cache_Write_Back
2009* Inputs: pointer to data cached in sys memory
2010* address of free block in flash
2011* Outputs: PASS=0 / FAIL=1
2012* Description: writes all the pages of Cache Block to flash
2013*
2014*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
2015static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
2016{
2017 int i, j, iErase;
2018 u64 old_page_addr, addr, phy_addr;
2019 u32 *pbt = (u32 *)g_pBlockTable;
2020 u32 lba;
2021
2022 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
2023 __FILE__, __LINE__, __func__);
2024
2025 old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
2026 GLOB_u64_Remainder(blk_addr, 2);
2027
2028 iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
2029
2030 pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
2031
2032#if CMD_DMA
2033 p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
2034 g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
2035
2036 p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
2037 p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
2038 DeviceInfo.nBitsInBlockDataSize);
2039 p_BTableChangesDelta->BT_Entry_Value =
2040 pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
2041 p_BTableChangesDelta->ValidFields = 0x0C;
2042#endif
2043
2044 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
2045 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2046 FTL_Write_IN_Progress_Block_Table_Page();
2047 }
2048
2049 for (i = 0; i < RETRY_TIMES; i++) {
2050 if (PASS == iErase) {
2051 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
2052 if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
2053 lba = BLK_FROM_ADDR(blk_addr);
2054 MARK_BLOCK_AS_BAD(pbt[lba]);
2055 i = RETRY_TIMES;
2056 break;
2057 }
2058 }
2059
2060 for (j = 0; j < CACHE_ITEM_NUM; j++) {
2061 addr = Cache.array[j].address;
2062 if ((addr <= blk_addr) &&
2063 ((addr + Cache.cache_item_size) > blk_addr))
2064 cache_block_to_write = j;
2065 }
2066
2067 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
2068 if (PASS == FTL_Cache_Update_Block(pData,
2069 old_page_addr, phy_addr)) {
2070 cache_block_to_write = UNHIT_CACHE_ITEM;
2071 break;
2072 } else {
2073 iErase = PASS;
2074 }
2075 }
2076
2077 if (i >= RETRY_TIMES) {
2078 if (ERR == FTL_Flash_Error_Handle(pData,
2079 old_page_addr, blk_addr))
2080 return ERR;
2081 else
2082 return FAIL;
2083 }
2084
2085 return PASS;
2086}
2087
2088/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& 1931/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
2089* Function: FTL_Cache_Write_Page 1932* Function: FTL_Cache_Write_Page
2090* Inputs: Pointer to buffer, page address, cache block number 1933* Inputs: Pointer to buffer, page address, cache block number
@@ -2370,159 +2213,6 @@ static int FTL_Write_Block_Table(int wForce)
2370 return 1; 2213 return 1;
2371} 2214}
2372 2215
2373/******************************************************************
2374* Function: GLOB_FTL_Flash_Format
2375* Inputs: none
2376* Outputs: PASS
2377* Description: The block table stores bad block info, including MDF+
2378* blocks gone bad over the ages. Therefore, if we have a
2379* block table in place, then use it to scan for bad blocks
2380* If not, then scan for MDF.
2381* Now, a block table will only be found if spectra was already
2382* being used. For a fresh flash, we'll go thru scanning for
2383* MDF. If spectra was being used, then there is a chance that
2384* the MDF has been corrupted. Spectra avoids writing to the
2385* first 2 bytes of the spare area to all pages in a block. This
2386* covers all known flash devices. However, since flash
2387* manufacturers have no standard of where the MDF is stored,
2388* this cannot guarantee that the MDF is protected for future
2389* devices too. The initial scanning for the block table assures
2390* this. It is ok even if the block table is outdated, as all
2391* we're looking for are bad block markers.
2392* Use this when mounting a file system or starting a
2393* new flash.
2394*
2395*********************************************************************/
2396static int FTL_Format_Flash(u8 valid_block_table)
2397{
2398 u32 i, j;
2399 u32 *pbt = (u32 *)g_pBlockTable;
2400 u32 tempNode;
2401 int ret;
2402
2403#if CMD_DMA
2404 u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
2405 if (ftl_cmd_cnt)
2406 return FAIL;
2407#endif
2408
2409 if (FAIL == FTL_Check_Block_Table(FAIL))
2410 valid_block_table = 0;
2411
2412 if (valid_block_table) {
2413 u8 switched = 1;
2414 u32 block, k;
2415
2416 k = DeviceInfo.wSpectraStartBlock;
2417 while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
2418 switched = 0;
2419 k++;
2420 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2421 j <= DeviceInfo.wSpectraEndBlock;
2422 j++, i++) {
2423 block = (pbt[i] & ~BAD_BLOCK) -
2424 DeviceInfo.wSpectraStartBlock;
2425 if (block != i) {
2426 switched = 1;
2427 tempNode = pbt[i];
2428 pbt[i] = pbt[block];
2429 pbt[block] = tempNode;
2430 }
2431 }
2432 }
2433 if ((k == DeviceInfo.wSpectraEndBlock) && switched)
2434 valid_block_table = 0;
2435 }
2436
2437 if (!valid_block_table) {
2438 memset(g_pBlockTable, 0,
2439 DeviceInfo.wDataBlockNum * sizeof(u32));
2440 memset(g_pWearCounter, 0,
2441 DeviceInfo.wDataBlockNum * sizeof(u8));
2442 if (DeviceInfo.MLCDevice)
2443 memset(g_pReadCounter, 0,
2444 DeviceInfo.wDataBlockNum * sizeof(u16));
2445#if CMD_DMA
2446 memset(g_pBTStartingCopy, 0,
2447 DeviceInfo.wDataBlockNum * sizeof(u32));
2448 memset(g_pWearCounterCopy, 0,
2449 DeviceInfo.wDataBlockNum * sizeof(u8));
2450 if (DeviceInfo.MLCDevice)
2451 memset(g_pReadCounterCopy, 0,
2452 DeviceInfo.wDataBlockNum * sizeof(u16));
2453#endif
2454 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2455 j <= DeviceInfo.wSpectraEndBlock;
2456 j++, i++) {
2457 if (GLOB_LLD_Get_Bad_Block((u32)j))
2458 pbt[i] = (u32)(BAD_BLOCK | j);
2459 }
2460 }
2461
2462 nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
2463
2464 for (j = DeviceInfo.wSpectraStartBlock, i = 0;
2465 j <= DeviceInfo.wSpectraEndBlock;
2466 j++, i++) {
2467 if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
2468 ret = GLOB_LLD_Erase_Block(j);
2469 if (FAIL == ret) {
2470 pbt[i] = (u32)(j);
2471 MARK_BLOCK_AS_BAD(pbt[i]);
2472 nand_dbg_print(NAND_DBG_WARN,
2473 "NAND Program fail in %s, Line %d, "
2474 "Function: %s, new Bad Block %d generated!\n",
2475 __FILE__, __LINE__, __func__, (int)j);
2476 } else {
2477 pbt[i] = (u32)(SPARE_BLOCK | j);
2478 }
2479 }
2480#if CMD_DMA
2481 pbtStartingCopy[i] = pbt[i];
2482#endif
2483 }
2484
2485 g_wBlockTableOffset = 0;
2486 for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
2487 DeviceInfo.wSpectraStartBlock))
2488 && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
2489 ;
2490 if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
2491 printk(KERN_ERR "All blocks bad!\n");
2492 return FAIL;
2493 } else {
2494 g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
2495 if (i != BLOCK_TABLE_INDEX) {
2496 tempNode = pbt[i];
2497 pbt[i] = pbt[BLOCK_TABLE_INDEX];
2498 pbt[BLOCK_TABLE_INDEX] = tempNode;
2499 }
2500 }
2501 pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
2502
2503#if CMD_DMA
2504 pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
2505#endif
2506
2507 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
2508 memset(g_pBTBlocks, 0xFF,
2509 (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
2510 g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
2511 FTL_Write_Block_Table(FAIL);
2512
2513 for (i = 0; i < CACHE_ITEM_NUM; i++) {
2514 Cache.array[i].address = NAND_CACHE_INIT_ADDR;
2515 Cache.array[i].use_cnt = 0;
2516 Cache.array[i].changed = CLEAR;
2517 }
2518
2519#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
2520 memcpy((void *)&cache_start_copy, (void *)&Cache,
2521 sizeof(struct flash_cache_tag));
2522#endif
2523 return PASS;
2524}
2525
2526static int force_format_nand(void) 2216static int force_format_nand(void)
2527{ 2217{
2528 u32 i; 2218 u32 i;
@@ -3031,112 +2721,6 @@ static int FTL_Read_Block_Table(void)
3031 return wResult; 2721 return wResult;
3032} 2722}
3033 2723
3034
3035/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3036* Function: FTL_Flash_Error_Handle
3037* Inputs: Pointer to data
3038* Page address
3039* Block address
3040* Outputs: PASS=0 / FAIL=1
3041* Description: It handles any error occured during Spectra operation
3042*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
3043static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
3044 u64 blk_addr)
3045{
3046 u32 i;
3047 int j;
3048 u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
3049 u64 phy_addr;
3050 int wErase = FAIL;
3051 int wResult = FAIL;
3052 u32 *pbt = (u32 *)g_pBlockTable;
3053
3054 nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
3055 __FILE__, __LINE__, __func__);
3056
3057 if (ERR == GLOB_FTL_Garbage_Collection())
3058 return ERR;
3059
3060 do {
3061 for (i = DeviceInfo.wSpectraEndBlock -
3062 DeviceInfo.wSpectraStartBlock;
3063 i > 0; i--) {
3064 if (IS_SPARE_BLOCK(i)) {
3065 tmp_node = (u32)(BAD_BLOCK |
3066 pbt[blk_node]);
3067 pbt[blk_node] = (u32)(pbt[i] &
3068 (~SPARE_BLOCK));
3069 pbt[i] = tmp_node;
3070#if CMD_DMA
3071 p_BTableChangesDelta =
3072 (struct BTableChangesDelta *)
3073 g_pBTDelta_Free;
3074 g_pBTDelta_Free +=
3075 sizeof(struct BTableChangesDelta);
3076
3077 p_BTableChangesDelta->ftl_cmd_cnt =
3078 ftl_cmd_cnt;
3079 p_BTableChangesDelta->BT_Index =
3080 blk_node;
3081 p_BTableChangesDelta->BT_Entry_Value =
3082 pbt[blk_node];
3083 p_BTableChangesDelta->ValidFields = 0x0C;
3084
3085 p_BTableChangesDelta =
3086 (struct BTableChangesDelta *)
3087 g_pBTDelta_Free;
3088 g_pBTDelta_Free +=
3089 sizeof(struct BTableChangesDelta);
3090
3091 p_BTableChangesDelta->ftl_cmd_cnt =
3092 ftl_cmd_cnt;
3093 p_BTableChangesDelta->BT_Index = i;
3094 p_BTableChangesDelta->BT_Entry_Value = pbt[i];
3095 p_BTableChangesDelta->ValidFields = 0x0C;
3096#endif
3097 wResult = PASS;
3098 break;
3099 }
3100 }
3101
3102 if (FAIL == wResult) {
3103 if (FAIL == GLOB_FTL_Garbage_Collection())
3104 break;
3105 else
3106 continue;
3107 }
3108
3109 if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
3110 g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
3111 FTL_Write_IN_Progress_Block_Table_Page();
3112 }
3113
3114 phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
3115
3116 for (j = 0; j < RETRY_TIMES; j++) {
3117 if (PASS == wErase) {
3118 if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
3119 MARK_BLOCK_AS_BAD(pbt[blk_node]);
3120 break;
3121 }
3122 }
3123 if (PASS == FTL_Cache_Update_Block(pData,
3124 old_page_addr,
3125 phy_addr)) {
3126 wResult = PASS;
3127 break;
3128 } else {
3129 wResult = FAIL;
3130 wErase = PASS;
3131 }
3132 }
3133 } while (FAIL == wResult);
3134
3135 FTL_Write_Block_Table(FAIL);
3136
3137 return wResult;
3138}
3139
3140/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& 2724/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
3141* Function: FTL_Get_Page_Num 2725* Function: FTL_Get_Page_Num
3142* Inputs: Size in bytes 2726* Inputs: Size in bytes
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 368c30a9d5ff..4af83d5318f2 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -219,6 +219,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
219 return -ENOENT; 219 return -ENOENT;
220 params.key_len = len; 220 params.key_len = len;
221 params.key = wlandev->wep_keys[key_index]; 221 params.key = wlandev->wep_keys[key_index];
222 params.seq_len = 0;
222 223
223 callback(cookie, &params); 224 callback(cookie, &params);
224 225
@@ -735,6 +736,8 @@ struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev)
735 priv->band.n_channels = ARRAY_SIZE(prism2_channels); 736 priv->band.n_channels = ARRAY_SIZE(prism2_channels);
736 priv->band.bitrates = priv->rates; 737 priv->band.bitrates = priv->rates;
737 priv->band.n_bitrates = ARRAY_SIZE(prism2_rates); 738 priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
739 priv->band.band = IEEE80211_BAND_2GHZ;
740 priv->band.ht_cap.ht_supported = false;
738 wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 741 wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
739 742
740 set_wiphy_dev(wiphy, dev); 743 set_wiphy_dev(wiphy, dev);
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 77d4d715a789..722c840ac638 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -769,6 +769,7 @@ static int __init zram_init(void)
769free_devices: 769free_devices:
770 while (dev_id) 770 while (dev_id)
771 destroy_device(&devices[--dev_id]); 771 destroy_device(&devices[--dev_id]);
772 kfree(devices);
772unregister: 773unregister:
773 unregister_blkdev(zram_major, "zram"); 774 unregister_blkdev(zram_major, "zram");
774out: 775out:
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 1c320bfa6fb7..f383cb42b1d7 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1127,6 +1127,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
1127{ 1127{
1128 struct cxacru_data *instance; 1128 struct cxacru_data *instance;
1129 struct usb_device *usb_dev = interface_to_usbdev(intf); 1129 struct usb_device *usb_dev = interface_to_usbdev(intf);
1130 struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
1130 int ret; 1131 int ret;
1131 1132
1132 /* instance init */ 1133 /* instance init */
@@ -1171,15 +1172,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
1171 goto fail; 1172 goto fail;
1172 } 1173 }
1173 1174
1174 usb_fill_int_urb(instance->rcv_urb, 1175 if (!cmd_ep) {
1176 dbg("cxacru_bind: no command endpoint");
1177 ret = -ENODEV;
1178 goto fail;
1179 }
1180
1181 if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1182 == USB_ENDPOINT_XFER_INT) {
1183 usb_fill_int_urb(instance->rcv_urb,
1175 usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), 1184 usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD),
1176 instance->rcv_buf, PAGE_SIZE, 1185 instance->rcv_buf, PAGE_SIZE,
1177 cxacru_blocking_completion, &instance->rcv_done, 1); 1186 cxacru_blocking_completion, &instance->rcv_done, 1);
1178 1187
1179 usb_fill_int_urb(instance->snd_urb, 1188 usb_fill_int_urb(instance->snd_urb,
1180 usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), 1189 usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD),
1181 instance->snd_buf, PAGE_SIZE, 1190 instance->snd_buf, PAGE_SIZE,
1182 cxacru_blocking_completion, &instance->snd_done, 4); 1191 cxacru_blocking_completion, &instance->snd_done, 4);
1192 } else {
1193 usb_fill_bulk_urb(instance->rcv_urb,
1194 usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD),
1195 instance->rcv_buf, PAGE_SIZE,
1196 cxacru_blocking_completion, &instance->rcv_done);
1197
1198 usb_fill_bulk_urb(instance->snd_urb,
1199 usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD),
1200 instance->snd_buf, PAGE_SIZE,
1201 cxacru_blocking_completion, &instance->snd_done);
1202 }
1183 1203
1184 mutex_init(&instance->cm_serialize); 1204 mutex_init(&instance->cm_serialize);
1185 1205
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 1833b3a71515..bc62fae0680f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -965,7 +965,8 @@ static int acm_probe(struct usb_interface *intf,
965 } 965 }
966 966
967 if (!buflen) { 967 if (!buflen) {
968 if (intf->cur_altsetting->endpoint->extralen && 968 if (intf->cur_altsetting->endpoint &&
969 intf->cur_altsetting->endpoint->extralen &&
969 intf->cur_altsetting->endpoint->extra) { 970 intf->cur_altsetting->endpoint->extra) {
970 dev_dbg(&intf->dev, 971 dev_dbg(&intf->dev,
971 "Seeking extra descriptors on endpoint\n"); 972 "Seeking extra descriptors on endpoint\n");
@@ -1481,6 +1482,11 @@ static int acm_reset_resume(struct usb_interface *intf)
1481 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ 1482 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
1482 USB_CDC_ACM_PROTO_VENDOR) 1483 USB_CDC_ACM_PROTO_VENDOR)
1483 1484
1485#define SAMSUNG_PCSUITE_ACM_INFO(x) \
1486 USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
1487 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
1488 USB_CDC_ACM_PROTO_VENDOR)
1489
1484/* 1490/*
1485 * USB driver structure. 1491 * USB driver structure.
1486 */ 1492 */
@@ -1591,6 +1597,17 @@ static const struct usb_device_id acm_ids[] = {
1591 { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ 1597 { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
1592 { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ 1598 { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
1593 { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ 1599 { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
1600 { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
1601 { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
1602 { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
1603 { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
1604 { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
1605 { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
1606 { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
1607 { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
1608 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
1609 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
1610 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
1594 1611
1595 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ 1612 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
1596 1613
@@ -1599,6 +1616,10 @@ static const struct usb_device_id acm_ids[] = {
1599 .driver_info = NOT_A_MODEM, 1616 .driver_info = NOT_A_MODEM,
1600 }, 1617 },
1601 1618
1619 /* control interfaces without any protocol set */
1620 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1621 USB_CDC_PROTO_NONE) },
1622
1602 /* control interfaces with various AT-command sets */ 1623 /* control interfaces with various AT-command sets */
1603 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1624 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1604 USB_CDC_ACM_PROTO_AT_V25TER) }, 1625 USB_CDC_ACM_PROTO_AT_V25TER) },
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index fd4c36ea5e46..844683e50383 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1724,6 +1724,15 @@ free_interfaces:
1724 if (ret) 1724 if (ret)
1725 goto free_interfaces; 1725 goto free_interfaces;
1726 1726
1727 /* if it's already configured, clear out old state first.
1728 * getting rid of old interfaces means unbinding their drivers.
1729 */
1730 if (dev->state != USB_STATE_ADDRESS)
1731 usb_disable_device(dev, 1); /* Skip ep0 */
1732
1733 /* Get rid of pending async Set-Config requests for this device */
1734 cancel_async_set_config(dev);
1735
1727 /* Make sure we have bandwidth (and available HCD resources) for this 1736 /* Make sure we have bandwidth (and available HCD resources) for this
1728 * configuration. Remove endpoints from the schedule if we're dropping 1737 * configuration. Remove endpoints from the schedule if we're dropping
1729 * this configuration to set configuration 0. After this point, the 1738 * this configuration to set configuration 0. After this point, the
@@ -1733,20 +1742,11 @@ free_interfaces:
1733 mutex_lock(&hcd->bandwidth_mutex); 1742 mutex_lock(&hcd->bandwidth_mutex);
1734 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); 1743 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
1735 if (ret < 0) { 1744 if (ret < 0) {
1736 usb_autosuspend_device(dev);
1737 mutex_unlock(&hcd->bandwidth_mutex); 1745 mutex_unlock(&hcd->bandwidth_mutex);
1746 usb_autosuspend_device(dev);
1738 goto free_interfaces; 1747 goto free_interfaces;
1739 } 1748 }
1740 1749
1741 /* if it's already configured, clear out old state first.
1742 * getting rid of old interfaces means unbinding their drivers.
1743 */
1744 if (dev->state != USB_STATE_ADDRESS)
1745 usb_disable_device(dev, 1); /* Skip ep0 */
1746
1747 /* Get rid of pending async Set-Config requests for this device */
1748 cancel_async_set_config(dev);
1749
1750 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 1750 ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1751 USB_REQ_SET_CONFIGURATION, 0, configuration, 0, 1751 USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
1752 NULL, 0, USB_CTRL_SET_TIMEOUT); 1752 NULL, 0, USB_CTRL_SET_TIMEOUT);
@@ -1761,8 +1761,8 @@ free_interfaces:
1761 if (!cp) { 1761 if (!cp) {
1762 usb_set_device_state(dev, USB_STATE_ADDRESS); 1762 usb_set_device_state(dev, USB_STATE_ADDRESS);
1763 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); 1763 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
1764 usb_autosuspend_device(dev);
1765 mutex_unlock(&hcd->bandwidth_mutex); 1764 mutex_unlock(&hcd->bandwidth_mutex);
1765 usb_autosuspend_device(dev);
1766 goto free_interfaces; 1766 goto free_interfaces;
1767 } 1767 }
1768 mutex_unlock(&hcd->bandwidth_mutex); 1768 mutex_unlock(&hcd->bandwidth_mutex);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index e483f80822d2..1160c55de7f2 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -723,12 +723,12 @@ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
723 723
724/** 724/**
725 * usb_string_ids_n() - allocate unused string IDs in batch 725 * usb_string_ids_n() - allocate unused string IDs in batch
726 * @cdev: the device whose string descriptor IDs are being allocated 726 * @c: the device whose string descriptor IDs are being allocated
727 * @n: number of string IDs to allocate 727 * @n: number of string IDs to allocate
728 * Context: single threaded during gadget setup 728 * Context: single threaded during gadget setup
729 * 729 *
730 * Returns the first requested ID. This ID and next @n-1 IDs are now 730 * Returns the first requested ID. This ID and next @n-1 IDs are now
731 * valid IDs. At least providind that @n is non zore because if it 731 * valid IDs. At least provided that @n is non-zero because if it
732 * is, returns last requested ID which is now very useful information. 732 * is, returns last requested ID which is now very useful information.
733 * 733 *
734 * @usb_string_ids_n() is called from bind() callbacks to allocate 734 * @usb_string_ids_n() is called from bind() callbacks to allocate
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 166bf71fd348..e03058fe23cb 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1609,6 +1609,7 @@ static int __init m66592_probe(struct platform_device *pdev)
1609 /* initialize ucd */ 1609 /* initialize ucd */
1610 m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL); 1610 m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL);
1611 if (m66592 == NULL) { 1611 if (m66592 == NULL) {
1612 ret = -ENOMEM;
1612 pr_err("kzalloc error\n"); 1613 pr_err("kzalloc error\n");
1613 goto clean_up; 1614 goto clean_up;
1614 } 1615 }
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 70a817842755..2456ccd9965e 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1557,6 +1557,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
1557 /* initialize ucd */ 1557 /* initialize ucd */
1558 r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL); 1558 r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
1559 if (r8a66597 == NULL) { 1559 if (r8a66597 == NULL) {
1560 ret = -ENOMEM;
1560 printk(KERN_ERR "kzalloc error\n"); 1561 printk(KERN_ERR "kzalloc error\n");
1561 goto clean_up; 1562 goto clean_up;
1562 } 1563 }
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 020fa5a25fda..972d5ddd1e18 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -293,9 +293,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
293 /* mandatory */ 293 /* mandatory */
294 case OID_GEN_VENDOR_DESCRIPTION: 294 case OID_GEN_VENDOR_DESCRIPTION:
295 pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); 295 pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
296 length = strlen (rndis_per_dev_params [configNr].vendorDescr); 296 if ( rndis_per_dev_params [configNr].vendorDescr ) {
297 memcpy (outbuf, 297 length = strlen (rndis_per_dev_params [configNr].vendorDescr);
298 rndis_per_dev_params [configNr].vendorDescr, length); 298 memcpy (outbuf,
299 rndis_per_dev_params [configNr].vendorDescr, length);
300 } else {
301 outbuf[0] = 0;
302 }
299 retval = 0; 303 retval = 0;
300 break; 304 break;
301 305
@@ -1148,7 +1152,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
1148#endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1152#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1149 1153
1150 1154
1151int __init rndis_init (void) 1155int rndis_init(void)
1152{ 1156{
1153 u8 i; 1157 u8 i;
1154 1158
diff --git a/drivers/usb/gadget/rndis.h b/drivers/usb/gadget/rndis.h
index c236aaa9dcd1..907c33008118 100644
--- a/drivers/usb/gadget/rndis.h
+++ b/drivers/usb/gadget/rndis.h
@@ -262,7 +262,7 @@ int rndis_signal_disconnect (int configNr);
262int rndis_state (int configNr); 262int rndis_state (int configNr);
263extern void rndis_set_host_mac (int configNr, const u8 *addr); 263extern void rndis_set_host_mac (int configNr, const u8 *addr);
264 264
265int __devinit rndis_init (void); 265int rndis_init(void);
266void rndis_exit (void); 266void rndis_exit (void);
267 267
268#endif /* _LINUX_RNDIS_H */ 268#endif /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 521ebed0118d..a229744a8c7d 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -12,8 +12,6 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13*/ 13*/
14 14
15#define DEBUG
16
17#include <linux/kernel.h> 15#include <linux/kernel.h>
18#include <linux/module.h> 16#include <linux/module.h>
19#include <linux/spinlock.h> 17#include <linux/spinlock.h>
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index 2dcffdac86d2..5e807f083bc8 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -94,7 +94,7 @@ uvc_v4l2_set_format(struct uvc_video *video, struct v4l2_format *fmt)
94 break; 94 break;
95 } 95 }
96 96
97 if (format == NULL || format->fcc != fmt->fmt.pix.pixelformat) { 97 if (i == ARRAY_SIZE(uvc_formats)) {
98 printk(KERN_INFO "Unsupported format 0x%08x.\n", 98 printk(KERN_INFO "Unsupported format 0x%08x.\n",
99 fmt->fmt.pix.pixelformat); 99 fmt->fmt.pix.pixelformat);
100 return -EINVAL; 100 return -EINVAL;
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 335ee699fd85..ba52be473027 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct platform_device *op, const struct of_device_id *mat
192 } 192 }
193 193
194 rv = usb_add_hcd(hcd, irq, 0); 194 rv = usb_add_hcd(hcd, irq, 0);
195 if (rv == 0) 195 if (rv)
196 return 0; 196 goto err_ehci;
197
198 return 0;
197 199
200err_ehci:
201 if (ehci->has_amcc_usb23)
202 iounmap(ehci->ohci_hcctrl_reg);
198 iounmap(hcd->regs); 203 iounmap(hcd->regs);
199err_ioremap: 204err_ioremap:
200 irq_dispose_mapping(irq); 205 irq_dispose_mapping(irq);
201err_irq: 206err_irq:
202 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 207 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
203
204 if (ehci->has_amcc_usb23)
205 iounmap(ehci->ohci_hcctrl_reg);
206err_rmr: 208err_rmr:
207 usb_put_hcd(hcd); 209 usb_put_hcd(hcd);
208 210
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index d1a3dfc9a408..bdba8c5d844a 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -829,6 +829,7 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
829 * almost immediately. With ISP1761, this register requires a delay of 829 * almost immediately. With ISP1761, this register requires a delay of
830 * 195ns between a write and subsequent read (see section 15.1.1.3). 830 * 195ns between a write and subsequent read (see section 15.1.1.3).
831 */ 831 */
832 mmiowb();
832 ndelay(195); 833 ndelay(195);
833 skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG); 834 skip_map = isp1760_readl(hcd->regs + HC_ATL_PTD_SKIPMAP_REG);
834 835
@@ -870,6 +871,7 @@ static void enqueue_an_INT_packet(struct usb_hcd *hcd, struct isp1760_qh *qh,
870 * almost immediately. With ISP1761, this register requires a delay of 871 * almost immediately. With ISP1761, this register requires a delay of
871 * 195ns between a write and subsequent read (see section 15.1.1.3). 872 * 195ns between a write and subsequent read (see section 15.1.1.3).
872 */ 873 */
874 mmiowb();
873 ndelay(195); 875 ndelay(195);
874 skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG); 876 skip_map = isp1760_readl(hcd->regs + HC_INT_PTD_SKIPMAP_REG);
875 877
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bc3f4f427065..48e60d166ff0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci,
131 *seg = (*seg)->next; 131 *seg = (*seg)->next;
132 *trb = ((*seg)->trbs); 132 *trb = ((*seg)->trbs);
133 } else { 133 } else {
134 *trb = (*trb)++; 134 (*trb)++;
135 } 135 }
136} 136}
137 137
@@ -1551,6 +1551,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1551 /* calc actual length */ 1551 /* calc actual length */
1552 if (ep->skip) { 1552 if (ep->skip) {
1553 td->urb->iso_frame_desc[idx].actual_length = 0; 1553 td->urb->iso_frame_desc[idx].actual_length = 0;
1554 /* Update ring dequeue pointer */
1555 while (ep_ring->dequeue != td->last_trb)
1556 inc_deq(xhci, ep_ring, false);
1557 inc_deq(xhci, ep_ring, false);
1554 return finish_td(xhci, td, event_trb, event, ep, status, true); 1558 return finish_td(xhci, td, event_trb, event, ep, status, true);
1555 } 1559 }
1556 1560
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index d240de097c62..801324af9470 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -439,7 +439,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
439 /* drain secondary buffer */ 439 /* drain secondary buffer */
440 int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary; 440 int amount = bytes_to_read < data_in_secondary ? bytes_to_read : data_in_secondary;
441 i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount); 441 i = copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount);
442 if (i < 0) { 442 if (i) {
443 retval = -EFAULT; 443 retval = -EFAULT;
444 goto exit; 444 goto exit;
445 } 445 }
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 2de49c8887c5..bc88c79875a1 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -542,7 +542,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
542 retval = io_res; 542 retval = io_res;
543 else { 543 else {
544 io_res = copy_to_user(user_buffer, buffer, dev->report_size); 544 io_res = copy_to_user(user_buffer, buffer, dev->report_size);
545 if (io_res < 0) 545 if (io_res)
546 retval = -EFAULT; 546 retval = -EFAULT;
547 } 547 }
548 break; 548 break;
@@ -574,7 +574,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
574 } 574 }
575 io_res = copy_to_user((struct iowarrior_info __user *)arg, &info, 575 io_res = copy_to_user((struct iowarrior_info __user *)arg, &info,
576 sizeof(struct iowarrior_info)); 576 sizeof(struct iowarrior_info));
577 if (io_res < 0) 577 if (io_res)
578 retval = -EFAULT; 578 retval = -EFAULT;
579 break; 579 break;
580 } 580 }
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 0e8888588d4e..05aaac1c3861 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -550,6 +550,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
550 struct twl4030_usb_data *pdata = pdev->dev.platform_data; 550 struct twl4030_usb_data *pdata = pdev->dev.platform_data;
551 struct twl4030_usb *twl; 551 struct twl4030_usb *twl;
552 int status, err; 552 int status, err;
553 u8 pwr;
553 554
554 if (!pdata) { 555 if (!pdata) {
555 dev_dbg(&pdev->dev, "platform_data not available\n"); 556 dev_dbg(&pdev->dev, "platform_data not available\n");
@@ -568,7 +569,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
568 twl->otg.set_peripheral = twl4030_set_peripheral; 569 twl->otg.set_peripheral = twl4030_set_peripheral;
569 twl->otg.set_suspend = twl4030_set_suspend; 570 twl->otg.set_suspend = twl4030_set_suspend;
570 twl->usb_mode = pdata->usb_mode; 571 twl->usb_mode = pdata->usb_mode;
571 twl->asleep = 1; 572
573 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL);
574
575 twl->asleep = (pwr & PHY_PWR_PHYPWD);
572 576
573 /* init spinlock for workqueue */ 577 /* init spinlock for workqueue */
574 spin_lock_init(&twl->lock); 578 spin_lock_init(&twl->lock);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 2bef4415c19c..4f1744c5871f 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,7 @@ static int debug;
56static const struct usb_device_id id_table[] = { 56static const struct usb_device_id id_table[] = {
57 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ 57 { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
58 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ 58 { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
59 { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
59 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ 60 { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
60 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ 61 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
61 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ 62 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
@@ -88,6 +89,7 @@ static const struct usb_device_id id_table[] = {
88 { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ 89 { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
89 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ 90 { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
90 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ 91 { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
92 { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
91 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ 93 { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
92 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ 94 { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
93 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ 95 { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
@@ -109,6 +111,7 @@ static const struct usb_device_id id_table[] = {
109 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ 111 { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
110 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ 112 { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
111 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ 113 { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
114 { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
112 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 115 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
113 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 116 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
114 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ 117 { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
@@ -122,14 +125,14 @@ static const struct usb_device_id id_table[] = {
122 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ 125 { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
123 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ 126 { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
124 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ 127 { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
125 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
126 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
127 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
128 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
129 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ 128 { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
130 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ 129 { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
131 { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ 130 { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
132 { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ 131 { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
132 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
133 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
134 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
135 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
133 { } /* Terminating Entry */ 136 { } /* Terminating Entry */
134}; 137};
135 138
@@ -222,8 +225,8 @@ static struct usb_serial_driver cp210x_device = {
222#define BITS_STOP_2 0x0002 225#define BITS_STOP_2 0x0002
223 226
224/* CP210X_SET_BREAK */ 227/* CP210X_SET_BREAK */
225#define BREAK_ON 0x0000 228#define BREAK_ON 0x0001
226#define BREAK_OFF 0x0001 229#define BREAK_OFF 0x0000
227 230
228/* CP210X_(SET_MHS|GET_MDMSTS) */ 231/* CP210X_(SET_MHS|GET_MDMSTS) */
229#define CONTROL_DTR 0x0001 232#define CONTROL_DTR 0x0001
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index eb12d9b096b4..97cc87d654ce 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = {
180 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, 180 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
181 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, 181 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
182 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, 182 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
183 { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
183 { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, 184 { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
184 { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, 185 { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
185 { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, 186 { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) },
@@ -750,6 +751,16 @@ static struct usb_device_id id_table_combined [] = {
750 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), 751 { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID),
751 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 752 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
752 { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, 753 { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
754 { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
755 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
756 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
757 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
758 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
759 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
760 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
761 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
762 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
763 { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
753 { }, /* Optional parameter entry */ 764 { }, /* Optional parameter entry */
754 { } /* Terminating entry */ 765 { } /* Terminating entry */
755}; 766};
@@ -1376,7 +1387,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
1376 } 1387 }
1377 1388
1378 /* set max packet size based on descriptor */ 1389 /* set max packet size based on descriptor */
1379 priv->max_packet_size = ep_desc->wMaxPacketSize; 1390 priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize);
1380 1391
1381 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); 1392 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
1382} 1393}
@@ -1831,7 +1842,7 @@ static int ftdi_process_packet(struct tty_struct *tty,
1831 1842
1832 if (port->port.console && port->sysrq) { 1843 if (port->port.console && port->sysrq) {
1833 for (i = 0; i < len; i++, ch++) { 1844 for (i = 0; i < len; i++, ch++) {
1834 if (!usb_serial_handle_sysrq_char(tty, port, *ch)) 1845 if (!usb_serial_handle_sysrq_char(port, *ch))
1835 tty_insert_flip_char(tty, *ch, flag); 1846 tty_insert_flip_char(tty, *ch, flag);
1836 } 1847 }
1837 } else { 1848 } else {
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6e612c52e763..15a4583775ad 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -110,6 +110,9 @@
110/* Propox devices */ 110/* Propox devices */
111#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 111#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
112 112
113/* Lenz LI-USB Computer Interface. */
114#define FTDI_LENZ_LIUSB_PID 0xD780
115
113/* 116/*
114 * Xsens Technologies BV products (http://www.xsens.com). 117 * Xsens Technologies BV products (http://www.xsens.com).
115 */ 118 */
@@ -132,6 +135,18 @@
132#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ 135#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
133 136
134/* 137/*
138 * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
139 */
140#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8
141#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9
142#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA
143#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
144#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC
145#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD
146#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE
147#define FTDI_CHAMSYS_WING_PID 0xDAFF
148
149/*
135 * Westrex International devices submitted by Cory Lee 150 * Westrex International devices submitted by Cory Lee
136 */ 151 */
137#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ 152#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
@@ -989,6 +1004,12 @@
989#define ALTI2_N3_PID 0x6001 /* Neptune 3 */ 1004#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
990 1005
991/* 1006/*
1007 * Ionics PlugComputer
1008 */
1009#define IONICS_VID 0x1c0c
1010#define IONICS_PLUGCOMPUTER_PID 0x0102
1011
1012/*
992 * Dresden Elektronik Sensor Terminal Board 1013 * Dresden Elektronik Sensor Terminal Board
993 */ 1014 */
994#define DE_VID 0x1cf1 /* Vendor ID */ 1015#define DE_VID 0x1cf1 /* Vendor ID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ca92f67747cc..e6833e216fc9 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -343,7 +343,7 @@ void usb_serial_generic_process_read_urb(struct urb *urb)
343 tty_insert_flip_string(tty, ch, urb->actual_length); 343 tty_insert_flip_string(tty, ch, urb->actual_length);
344 else { 344 else {
345 for (i = 0; i < urb->actual_length; i++, ch++) { 345 for (i = 0; i < urb->actual_length; i++, ch++) {
346 if (!usb_serial_handle_sysrq_char(tty, port, *ch)) 346 if (!usb_serial_handle_sysrq_char(port, *ch))
347 tty_insert_flip_char(tty, *ch, TTY_NORMAL); 347 tty_insert_flip_char(tty, *ch, TTY_NORMAL);
348 } 348 }
349 } 349 }
@@ -448,12 +448,11 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
448EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle); 448EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
449 449
450#ifdef CONFIG_MAGIC_SYSRQ 450#ifdef CONFIG_MAGIC_SYSRQ
451int usb_serial_handle_sysrq_char(struct tty_struct *tty, 451int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
452 struct usb_serial_port *port, unsigned int ch)
453{ 452{
454 if (port->sysrq && port->port.console) { 453 if (port->sysrq && port->port.console) {
455 if (ch && time_before(jiffies, port->sysrq)) { 454 if (ch && time_before(jiffies, port->sysrq)) {
456 handle_sysrq(ch, tty); 455 handle_sysrq(ch);
457 port->sysrq = 0; 456 port->sysrq = 0;
458 return 1; 457 return 1;
459 } 458 }
@@ -462,8 +461,7 @@ int usb_serial_handle_sysrq_char(struct tty_struct *tty,
462 return 0; 461 return 0;
463} 462}
464#else 463#else
465int usb_serial_handle_sysrq_char(struct tty_struct *tty, 464int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch)
466 struct usb_serial_port *port, unsigned int ch)
467{ 465{
468 return 0; 466 return 0;
469} 467}
@@ -518,6 +516,7 @@ void usb_serial_generic_disconnect(struct usb_serial *serial)
518 for (i = 0; i < serial->num_ports; ++i) 516 for (i = 0; i < serial->num_ports; ++i)
519 generic_cleanup(serial->port[i]); 517 generic_cleanup(serial->port[i]);
520} 518}
519EXPORT_SYMBOL_GPL(usb_serial_generic_disconnect);
521 520
522void usb_serial_generic_release(struct usb_serial *serial) 521void usb_serial_generic_release(struct usb_serial *serial)
523{ 522{
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index dc47f986df57..a7cfc5952937 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial)
1151 1151
1152 /* Check if we have an old version in the I2C and 1152 /* Check if we have an old version in the I2C and
1153 update if necessary */ 1153 update if necessary */
1154 if (download_cur_ver != download_new_ver) { 1154 if (download_cur_ver < download_new_ver) {
1155 dbg("%s - Update I2C dld from %d.%d to %d.%d", 1155 dbg("%s - Update I2C dld from %d.%d to %d.%d",
1156 __func__, 1156 __func__,
1157 firmware_version->Ver_Major, 1157 firmware_version->Ver_Major,
@@ -1284,7 +1284,7 @@ static int download_fw(struct edgeport_serial *serial)
1284 kfree(header); 1284 kfree(header);
1285 kfree(rom_desc); 1285 kfree(rom_desc);
1286 kfree(ti_manuf_desc); 1286 kfree(ti_manuf_desc);
1287 return status; 1287 return -EINVAL;
1288 } 1288 }
1289 1289
1290 /* Update I2C with type 0xf2 record with correct 1290 /* Update I2C with type 0xf2 record with correct
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 585b7e663740..1c9b6e9b2386 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -119,16 +119,20 @@
119 * by making a change here, in moschip_port_id_table, and in 119 * by making a change here, in moschip_port_id_table, and in
120 * moschip_id_table_combined 120 * moschip_id_table_combined
121 */ 121 */
122#define USB_VENDOR_ID_BANDB 0x0856 122#define USB_VENDOR_ID_BANDB 0x0856
123#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 123#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
124#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 124#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
125#define BANDB_DEVICE_ID_US9ML2_2 0xAC29 125#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
126#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 126#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
127#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 127#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
128#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 128#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
129#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 129#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
130#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 130#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
131#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 131#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
132#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
133#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
134#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
135#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
132 136
133/* This driver also supports 137/* This driver also supports
134 * ATEN UC2324 device using Moschip MCS7840 138 * ATEN UC2324 device using Moschip MCS7840
@@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = {
184 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 188 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
185 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 189 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
186 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
187 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, 192 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
193 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
188 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, 194 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
189 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, 195 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, 196 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, 197 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
192 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 198 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
199 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
193 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 200 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
201 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
194 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, 202 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
195 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 203 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
196 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 204 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
@@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
201 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 209 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
202 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 210 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
203 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, 211 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
212 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
204 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, 213 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
214 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
205 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, 215 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
206 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, 216 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
207 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, 217 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
208 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, 218 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
209 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 219 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
220 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
210 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 221 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
222 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
211 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, 223 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
212 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 224 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
213 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 225 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index a6b207c84917..1f00f243c26c 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -25,6 +25,7 @@ static int debug;
25 25
26static const struct usb_device_id id_table[] = { 26static const struct usb_device_id id_table[] = {
27 { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ 27 { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
28 { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */
28 { }, 29 { },
29}; 30};
30MODULE_DEVICE_TABLE(usb, id_table); 31MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 9fc6ea2c681f..c46911af282f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -164,6 +164,14 @@ static void option_instat_callback(struct urb *urb);
164#define YISO_VENDOR_ID 0x0EAB 164#define YISO_VENDOR_ID 0x0EAB
165#define YISO_PRODUCT_U893 0xC893 165#define YISO_PRODUCT_U893 0xC893
166 166
167/*
168 * NOVATEL WIRELESS PRODUCTS
169 *
170 * Note from Novatel Wireless:
171 * If your Novatel modem does not work on linux, don't
172 * change the option module, but check our website. If
173 * that does not help, contact ddeschepper@nvtl.com
174*/
167/* MERLIN EVDO PRODUCTS */ 175/* MERLIN EVDO PRODUCTS */
168#define NOVATELWIRELESS_PRODUCT_V640 0x1100 176#define NOVATELWIRELESS_PRODUCT_V640 0x1100
169#define NOVATELWIRELESS_PRODUCT_V620 0x1110 177#define NOVATELWIRELESS_PRODUCT_V620 0x1110
@@ -185,24 +193,39 @@ static void option_instat_callback(struct urb *urb);
185#define NOVATELWIRELESS_PRODUCT_EU730 0x2400 193#define NOVATELWIRELESS_PRODUCT_EU730 0x2400
186#define NOVATELWIRELESS_PRODUCT_EU740 0x2410 194#define NOVATELWIRELESS_PRODUCT_EU740 0x2410
187#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 195#define NOVATELWIRELESS_PRODUCT_EU870D 0x2420
188
189/* OVATION PRODUCTS */ 196/* OVATION PRODUCTS */
190#define NOVATELWIRELESS_PRODUCT_MC727 0x4100 197#define NOVATELWIRELESS_PRODUCT_MC727 0x4100
191#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 198#define NOVATELWIRELESS_PRODUCT_MC950D 0x4400
192#define NOVATELWIRELESS_PRODUCT_U727 0x5010 199/*
193#define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100 200 * Note from Novatel Wireless:
194#define NOVATELWIRELESS_PRODUCT_MC760 0x6000 201 * All PID in the 5xxx range are currently reserved for
202 * auto-install CDROMs, and should not be added to this
203 * module.
204 *
205 * #define NOVATELWIRELESS_PRODUCT_U727 0x5010
206 * #define NOVATELWIRELESS_PRODUCT_MC727_NEW 0x5100
207*/
195#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002 208#define NOVATELWIRELESS_PRODUCT_OVMC760 0x6002
196 209#define NOVATELWIRELESS_PRODUCT_MC780 0x6010
197/* FUTURE NOVATEL PRODUCTS */ 210#define NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED 0x6000
198#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0X6001 211#define NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED 0x6001
199#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0X7000 212#define NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED 0x7000
200#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0X7001 213#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED 0x7001
201#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0X8000 214#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3 0x7003
202#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0X8001 215#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4 0x7004
203#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0X9000 216#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5 0x7005
204#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0X9001 217#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6 0x7006
205#define NOVATELWIRELESS_PRODUCT_GLOBAL 0XA001 218#define NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7 0x7007
219#define NOVATELWIRELESS_PRODUCT_MC996D 0x7030
220#define NOVATELWIRELESS_PRODUCT_MF3470 0x7041
221#define NOVATELWIRELESS_PRODUCT_MC547 0x7042
222#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED 0x8000
223#define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001
224#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000
225#define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001
226#define NOVATELWIRELESS_PRODUCT_G1 0xA001
227#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
228#define NOVATELWIRELESS_PRODUCT_G2 0xA010
206 229
207/* AMOI PRODUCTS */ 230/* AMOI PRODUCTS */
208#define AMOI_VENDOR_ID 0x1614 231#define AMOI_VENDOR_ID 0x1614
@@ -365,6 +388,10 @@ static void option_instat_callback(struct urb *urb);
365#define OLIVETTI_VENDOR_ID 0x0b3c 388#define OLIVETTI_VENDOR_ID 0x0b3c
366#define OLIVETTI_PRODUCT_OLICARD100 0xc000 389#define OLIVETTI_PRODUCT_OLICARD100 0xc000
367 390
391/* Celot products */
392#define CELOT_VENDOR_ID 0x211f
393#define CELOT_PRODUCT_CT680M 0x6801
394
368/* some devices interfaces need special handling due to a number of reasons */ 395/* some devices interfaces need special handling due to a number of reasons */
369enum option_blacklist_reason { 396enum option_blacklist_reason {
370 OPTION_BLACKLIST_NONE = 0, 397 OPTION_BLACKLIST_NONE = 0,
@@ -486,36 +513,44 @@ static const struct usb_device_id option_ids[] = {
486 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 513 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
487 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 514 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
488 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, 515 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
489 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ 516 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
490 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ 517 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
491 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ 518 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
492 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */ 519 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) },
493 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */ 520 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) },
494 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */ 521 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) },
495 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */ 522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) },
496 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */ 523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) },
497 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */ 524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) },
498 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ 525 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) },
499 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ 526 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) },
500 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ 527 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) },
501 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) }, /* Novatel Merlin ES620 SM Bus */ 528 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES620) },
502 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ 529 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) },
503 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ 530 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) },
504 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ 531 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) },
505 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ 532 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) },
506 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ 533 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) },
507 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727_NEW) }, /* Novatel MC727/U727/USB727 refresh */ 534 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) },
508 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U727) }, /* Novatel MC727/U727/USB727 */ 535 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC780) },
509 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC760) }, /* Novatel MC760/U760/USB760 */ 536 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_FULLSPEED) },
510 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_OVMC760) }, /* Novatel Ovation MC760 */ 537 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) },
511 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_FULLSPEED) }, /* Novatel HSPA product */ 538 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
512 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, /* Novatel EVDO Embedded product */ 539 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
513 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, /* Novatel HSPA Embedded product */ 540 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
514 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, /* Novatel EVDO product */ 541 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
515 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) }, /* Novatel HSPA product */ 542 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
516 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, /* Novatel EVDO Embedded product */ 543 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
517 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, /* Novatel HSPA Embedded product */ 544 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED6) },
518 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL) }, /* Novatel Global product */ 545 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED7) },
546 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC996D) },
547 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MF3470) },
548 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) },
549 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) },
550 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) },
551 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
552 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
553 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
519 554
520 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, 555 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
521 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, 556 { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -887,10 +922,9 @@ static const struct usb_device_id option_ids[] = {
887 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, 922 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
888 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, 923 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
889 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, 924 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
890
891 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, 925 { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
892
893 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 926 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
927 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
894 { } /* Terminating entry */ 928 { } /* Terminating entry */
895}; 929};
896MODULE_DEVICE_TABLE(usb, option_ids); 930MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 6b6001822279..8ae4c6cbc38a 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = {
86 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, 86 { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
87 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, 87 { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
88 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, 88 { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
89 { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
89 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, 90 { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
90 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, 91 { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) },
91 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, 92 { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) },
@@ -788,7 +789,7 @@ static void pl2303_process_read_urb(struct urb *urb)
788 789
789 if (port->port.console && port->sysrq) { 790 if (port->port.console && port->sysrq) {
790 for (i = 0; i < urb->actual_length; ++i) 791 for (i = 0; i < urb->actual_length; ++i)
791 if (!usb_serial_handle_sysrq_char(tty, port, data[i])) 792 if (!usb_serial_handle_sysrq_char(port, data[i]))
792 tty_insert_flip_char(tty, data[i], tty_flag); 793 tty_insert_flip_char(tty, data[i], tty_flag);
793 } else { 794 } else {
794 tty_insert_flip_string_fixed_flag(tty, data, tty_flag, 795 tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a871645389dd..43eb9bdad422 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -128,6 +128,10 @@
128#define CRESSI_VENDOR_ID 0x04b8 128#define CRESSI_VENDOR_ID 0x04b8
129#define CRESSI_EDY_PRODUCT_ID 0x0521 129#define CRESSI_EDY_PRODUCT_ID 0x0521
130 130
131/* Zeagle dive computer interface */
132#define ZEAGLE_VENDOR_ID 0x04b8
133#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522
134
131/* Sony, USB data cable for CMD-Jxx mobile phones */ 135/* Sony, USB data cable for CMD-Jxx mobile phones */
132#define SONY_VENDOR_ID 0x054c 136#define SONY_VENDOR_ID 0x054c
133#define SONY_QN3USB_PRODUCT_ID 0x0437 137#define SONY_QN3USB_PRODUCT_ID 0x0437
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 6e82d4f54bc8..e986002b3844 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -15,6 +15,7 @@
15#include <linux/serial.h> 15#include <linux/serial.h>
16#include <linux/usb.h> 16#include <linux/usb.h>
17#include <linux/usb/serial.h> 17#include <linux/usb/serial.h>
18#include <linux/serial_reg.h>
18#include <linux/uaccess.h> 19#include <linux/uaccess.h>
19 20
20#define QT_OPEN_CLOSE_CHANNEL 0xca 21#define QT_OPEN_CLOSE_CHANNEL 0xca
@@ -27,36 +28,11 @@
27#define QT_HW_FLOW_CONTROL_MASK 0xc5 28#define QT_HW_FLOW_CONTROL_MASK 0xc5
28#define QT_SW_FLOW_CONTROL_MASK 0xc6 29#define QT_SW_FLOW_CONTROL_MASK 0xc6
29 30
30#define MODEM_CTL_REGISTER 0x04
31#define MODEM_STATUS_REGISTER 0x06
32
33
34#define SERIAL_LSR_OE 0x02
35#define SERIAL_LSR_PE 0x04
36#define SERIAL_LSR_FE 0x08
37#define SERIAL_LSR_BI 0x10
38
39#define SERIAL_LSR_TEMT 0x40
40
41#define SERIAL_MCR_DTR 0x01
42#define SERIAL_MCR_RTS 0x02
43#define SERIAL_MCR_LOOP 0x10
44
45#define SERIAL_MSR_CTS 0x10
46#define SERIAL_MSR_CD 0x80
47#define SERIAL_MSR_RI 0x40
48#define SERIAL_MSR_DSR 0x20
49#define SERIAL_MSR_MASK 0xf0 31#define SERIAL_MSR_MASK 0xf0
50 32
51#define SERIAL_CRTSCTS ((SERIAL_MCR_RTS << 8) | SERIAL_MSR_CTS) 33#define SERIAL_CRTSCTS ((UART_MCR_RTS << 8) | UART_MSR_CTS)
52 34
53#define SERIAL_8_DATA 0x03 35#define SERIAL_EVEN_PARITY (UART_LCR_PARITY | UART_LCR_EPAR)
54#define SERIAL_7_DATA 0x02
55#define SERIAL_6_DATA 0x01
56#define SERIAL_5_DATA 0x00
57
58#define SERIAL_ODD_PARITY 0X08
59#define SERIAL_EVEN_PARITY 0X18
60 36
61#define MAX_BAUD_RATE 460800 37#define MAX_BAUD_RATE 460800
62 38
@@ -70,7 +46,7 @@
70#define FULLPWRBIT 0x00000080 46#define FULLPWRBIT 0x00000080
71#define NEXT_BOARD_POWER_BIT 0x00000004 47#define NEXT_BOARD_POWER_BIT 0x00000004
72 48
73static int debug = 1; 49static int debug;
74 50
75/* Version Information */ 51/* Version Information */
76#define DRIVER_VERSION "v0.1" 52#define DRIVER_VERSION "v0.1"
@@ -99,10 +75,12 @@ static struct usb_driver ssu100_driver = {
99}; 75};
100 76
101struct ssu100_port_private { 77struct ssu100_port_private {
78 spinlock_t status_lock;
102 u8 shadowLSR; 79 u8 shadowLSR;
103 u8 shadowMSR; 80 u8 shadowMSR;
104 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ 81 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
105 unsigned short max_packet_size; 82 unsigned short max_packet_size;
83 struct async_icount icount;
106}; 84};
107 85
108static void ssu100_release(struct usb_serial *serial) 86static void ssu100_release(struct usb_serial *serial)
@@ -150,9 +128,10 @@ static inline int ssu100_getregister(struct usb_device *dev,
150 128
151static inline int ssu100_setregister(struct usb_device *dev, 129static inline int ssu100_setregister(struct usb_device *dev,
152 unsigned short uart, 130 unsigned short uart,
131 unsigned short reg,
153 u16 data) 132 u16 data)
154{ 133{
155 u16 value = (data << 8) | MODEM_CTL_REGISTER; 134 u16 value = (data << 8) | reg;
156 135
157 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 136 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
158 QT_SET_GET_REGISTER, 0x40, value, uart, 137 QT_SET_GET_REGISTER, 0x40, value, uart,
@@ -178,11 +157,11 @@ static inline int update_mctrl(struct usb_device *dev, unsigned int set,
178 clear &= ~set; /* 'set' takes precedence over 'clear' */ 157 clear &= ~set; /* 'set' takes precedence over 'clear' */
179 urb_value = 0; 158 urb_value = 0;
180 if (set & TIOCM_DTR) 159 if (set & TIOCM_DTR)
181 urb_value |= SERIAL_MCR_DTR; 160 urb_value |= UART_MCR_DTR;
182 if (set & TIOCM_RTS) 161 if (set & TIOCM_RTS)
183 urb_value |= SERIAL_MCR_RTS; 162 urb_value |= UART_MCR_RTS;
184 163
185 result = ssu100_setregister(dev, 0, urb_value); 164 result = ssu100_setregister(dev, 0, UART_MCR, urb_value);
186 if (result < 0) 165 if (result < 0)
187 dbg("%s Error from MODEM_CTRL urb", __func__); 166 dbg("%s Error from MODEM_CTRL urb", __func__);
188 167
@@ -264,24 +243,24 @@ static void ssu100_set_termios(struct tty_struct *tty,
264 243
265 if (cflag & PARENB) { 244 if (cflag & PARENB) {
266 if (cflag & PARODD) 245 if (cflag & PARODD)
267 urb_value |= SERIAL_ODD_PARITY; 246 urb_value |= UART_LCR_PARITY;
268 else 247 else
269 urb_value |= SERIAL_EVEN_PARITY; 248 urb_value |= SERIAL_EVEN_PARITY;
270 } 249 }
271 250
272 switch (cflag & CSIZE) { 251 switch (cflag & CSIZE) {
273 case CS5: 252 case CS5:
274 urb_value |= SERIAL_5_DATA; 253 urb_value |= UART_LCR_WLEN5;
275 break; 254 break;
276 case CS6: 255 case CS6:
277 urb_value |= SERIAL_6_DATA; 256 urb_value |= UART_LCR_WLEN6;
278 break; 257 break;
279 case CS7: 258 case CS7:
280 urb_value |= SERIAL_7_DATA; 259 urb_value |= UART_LCR_WLEN7;
281 break; 260 break;
282 default: 261 default:
283 case CS8: 262 case CS8:
284 urb_value |= SERIAL_8_DATA; 263 urb_value |= UART_LCR_WLEN8;
285 break; 264 break;
286 } 265 }
287 266
@@ -333,6 +312,7 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
333 struct ssu100_port_private *priv = usb_get_serial_port_data(port); 312 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
334 u8 *data; 313 u8 *data;
335 int result; 314 int result;
315 unsigned long flags;
336 316
337 dbg("%s - port %d", __func__, port->number); 317 dbg("%s - port %d", __func__, port->number);
338 318
@@ -350,11 +330,10 @@ static int ssu100_open(struct tty_struct *tty, struct usb_serial_port *port)
350 return result; 330 return result;
351 } 331 }
352 332
353 priv->shadowLSR = data[0] & (SERIAL_LSR_OE | SERIAL_LSR_PE | 333 spin_lock_irqsave(&priv->status_lock, flags);
354 SERIAL_LSR_FE | SERIAL_LSR_BI); 334 priv->shadowLSR = data[0];
355 335 priv->shadowMSR = data[1];
356 priv->shadowMSR = data[1] & (SERIAL_MSR_CTS | SERIAL_MSR_DSR | 336 spin_unlock_irqrestore(&priv->status_lock, flags);
357 SERIAL_MSR_RI | SERIAL_MSR_CD);
358 337
359 kfree(data); 338 kfree(data);
360 339
@@ -398,11 +377,51 @@ static int get_serial_info(struct usb_serial_port *port,
398 return 0; 377 return 0;
399} 378}
400 379
380static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
381{
382 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
383 struct async_icount prev, cur;
384 unsigned long flags;
385
386 spin_lock_irqsave(&priv->status_lock, flags);
387 prev = priv->icount;
388 spin_unlock_irqrestore(&priv->status_lock, flags);
389
390 while (1) {
391 wait_event_interruptible(priv->delta_msr_wait,
392 ((priv->icount.rng != prev.rng) ||
393 (priv->icount.dsr != prev.dsr) ||
394 (priv->icount.dcd != prev.dcd) ||
395 (priv->icount.cts != prev.cts)));
396
397 if (signal_pending(current))
398 return -ERESTARTSYS;
399
400 spin_lock_irqsave(&priv->status_lock, flags);
401 cur = priv->icount;
402 spin_unlock_irqrestore(&priv->status_lock, flags);
403
404 if ((prev.rng == cur.rng) &&
405 (prev.dsr == cur.dsr) &&
406 (prev.dcd == cur.dcd) &&
407 (prev.cts == cur.cts))
408 return -EIO;
409
410 if ((arg & TIOCM_RNG && (prev.rng != cur.rng)) ||
411 (arg & TIOCM_DSR && (prev.dsr != cur.dsr)) ||
412 (arg & TIOCM_CD && (prev.dcd != cur.dcd)) ||
413 (arg & TIOCM_CTS && (prev.cts != cur.cts)))
414 return 0;
415 }
416 return 0;
417}
418
401static int ssu100_ioctl(struct tty_struct *tty, struct file *file, 419static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
402 unsigned int cmd, unsigned long arg) 420 unsigned int cmd, unsigned long arg)
403{ 421{
404 struct usb_serial_port *port = tty->driver_data; 422 struct usb_serial_port *port = tty->driver_data;
405 struct ssu100_port_private *priv = usb_get_serial_port_data(port); 423 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
424 void __user *user_arg = (void __user *)arg;
406 425
407 dbg("%s cmd 0x%04x", __func__, cmd); 426 dbg("%s cmd 0x%04x", __func__, cmd);
408 427
@@ -412,28 +431,28 @@ static int ssu100_ioctl(struct tty_struct *tty, struct file *file,
412 (struct serial_struct __user *) arg); 431 (struct serial_struct __user *) arg);
413 432
414 case TIOCMIWAIT: 433 case TIOCMIWAIT:
415 while (priv != NULL) { 434 return wait_modem_info(port, arg);
416 u8 prevMSR = priv->shadowMSR & SERIAL_MSR_MASK; 435
417 interruptible_sleep_on(&priv->delta_msr_wait); 436 case TIOCGICOUNT:
418 /* see if a signal did it */ 437 {
419 if (signal_pending(current)) 438 struct serial_icounter_struct icount;
420 return -ERESTARTSYS; 439 struct async_icount cnow = priv->icount;
421 else { 440 memset(&icount, 0, sizeof(icount));
422 u8 diff = (priv->shadowMSR & SERIAL_MSR_MASK) ^ prevMSR; 441 icount.cts = cnow.cts;
423 if (!diff) 442 icount.dsr = cnow.dsr;
424 return -EIO; /* no change => error */ 443 icount.rng = cnow.rng;
425 444 icount.dcd = cnow.dcd;
426 /* Return 0 if caller wanted to know about 445 icount.rx = cnow.rx;
427 these bits */ 446 icount.tx = cnow.tx;
428 447 icount.frame = cnow.frame;
429 if (((arg & TIOCM_RNG) && (diff & SERIAL_MSR_RI)) || 448 icount.overrun = cnow.overrun;
430 ((arg & TIOCM_DSR) && (diff & SERIAL_MSR_DSR)) || 449 icount.parity = cnow.parity;
431 ((arg & TIOCM_CD) && (diff & SERIAL_MSR_CD)) || 450 icount.brk = cnow.brk;
432 ((arg & TIOCM_CTS) && (diff & SERIAL_MSR_CTS))) 451 icount.buf_overrun = cnow.buf_overrun;
433 return 0; 452 if (copy_to_user(user_arg, &icount, sizeof(icount)))
434 } 453 return -EFAULT;
435 }
436 return 0; 454 return 0;
455 }
437 456
438 default: 457 default:
439 break; 458 break;
@@ -455,6 +474,7 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
455 474
456 unsigned num_endpoints; 475 unsigned num_endpoints;
457 int i; 476 int i;
477 unsigned long flags;
458 478
459 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; 479 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
460 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); 480 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -466,7 +486,9 @@ static void ssu100_set_max_packet_size(struct usb_serial_port *port)
466 } 486 }
467 487
468 /* set max packet size based on descriptor */ 488 /* set max packet size based on descriptor */
489 spin_lock_irqsave(&priv->status_lock, flags);
469 priv->max_packet_size = ep_desc->wMaxPacketSize; 490 priv->max_packet_size = ep_desc->wMaxPacketSize;
491 spin_unlock_irqrestore(&priv->status_lock, flags);
470 492
471 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); 493 dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size);
472} 494}
@@ -485,9 +507,9 @@ static int ssu100_attach(struct usb_serial *serial)
485 return -ENOMEM; 507 return -ENOMEM;
486 } 508 }
487 509
510 spin_lock_init(&priv->status_lock);
488 init_waitqueue_head(&priv->delta_msr_wait); 511 init_waitqueue_head(&priv->delta_msr_wait);
489 usb_set_serial_port_data(port, priv); 512 usb_set_serial_port_data(port, priv);
490
491 ssu100_set_max_packet_size(port); 513 ssu100_set_max_packet_size(port);
492 514
493 return ssu100_initdevice(serial->dev); 515 return ssu100_initdevice(serial->dev);
@@ -506,20 +528,20 @@ static int ssu100_tiocmget(struct tty_struct *tty, struct file *file)
506 if (!d) 528 if (!d)
507 return -ENOMEM; 529 return -ENOMEM;
508 530
509 r = ssu100_getregister(dev, 0, MODEM_CTL_REGISTER, d); 531 r = ssu100_getregister(dev, 0, UART_MCR, d);
510 if (r < 0) 532 if (r < 0)
511 goto mget_out; 533 goto mget_out;
512 534
513 r = ssu100_getregister(dev, 0, MODEM_STATUS_REGISTER, d+1); 535 r = ssu100_getregister(dev, 0, UART_MSR, d+1);
514 if (r < 0) 536 if (r < 0)
515 goto mget_out; 537 goto mget_out;
516 538
517 r = (d[0] & SERIAL_MCR_DTR ? TIOCM_DTR : 0) | 539 r = (d[0] & UART_MCR_DTR ? TIOCM_DTR : 0) |
518 (d[0] & SERIAL_MCR_RTS ? TIOCM_RTS : 0) | 540 (d[0] & UART_MCR_RTS ? TIOCM_RTS : 0) |
519 (d[1] & SERIAL_MSR_CTS ? TIOCM_CTS : 0) | 541 (d[1] & UART_MSR_CTS ? TIOCM_CTS : 0) |
520 (d[1] & SERIAL_MSR_CD ? TIOCM_CAR : 0) | 542 (d[1] & UART_MSR_DCD ? TIOCM_CAR : 0) |
521 (d[1] & SERIAL_MSR_RI ? TIOCM_RI : 0) | 543 (d[1] & UART_MSR_RI ? TIOCM_RI : 0) |
522 (d[1] & SERIAL_MSR_DSR ? TIOCM_DSR : 0); 544 (d[1] & UART_MSR_DSR ? TIOCM_DSR : 0);
523 545
524mget_out: 546mget_out:
525 kfree(d); 547 kfree(d);
@@ -546,7 +568,7 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
546 if (!port->serial->disconnected) { 568 if (!port->serial->disconnected) {
547 /* Disable flow control */ 569 /* Disable flow control */
548 if (!on && 570 if (!on &&
549 ssu100_setregister(dev, 0, 0) < 0) 571 ssu100_setregister(dev, 0, UART_MCR, 0) < 0)
550 dev_err(&port->dev, "error from flowcontrol urb\n"); 572 dev_err(&port->dev, "error from flowcontrol urb\n");
551 /* drop RTS and DTR */ 573 /* drop RTS and DTR */
552 if (on) 574 if (on)
@@ -557,34 +579,88 @@ static void ssu100_dtr_rts(struct usb_serial_port *port, int on)
557 mutex_unlock(&port->serial->disc_mutex); 579 mutex_unlock(&port->serial->disc_mutex);
558} 580}
559 581
582static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
583{
584 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
585 unsigned long flags;
586
587 spin_lock_irqsave(&priv->status_lock, flags);
588 priv->shadowMSR = msr;
589 spin_unlock_irqrestore(&priv->status_lock, flags);
590
591 if (msr & UART_MSR_ANY_DELTA) {
592 /* update input line counters */
593 if (msr & UART_MSR_DCTS)
594 priv->icount.cts++;
595 if (msr & UART_MSR_DDSR)
596 priv->icount.dsr++;
597 if (msr & UART_MSR_DDCD)
598 priv->icount.dcd++;
599 if (msr & UART_MSR_TERI)
600 priv->icount.rng++;
601 wake_up_interruptible(&priv->delta_msr_wait);
602 }
603}
604
605static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
606 char *tty_flag)
607{
608 struct ssu100_port_private *priv = usb_get_serial_port_data(port);
609 unsigned long flags;
610
611 spin_lock_irqsave(&priv->status_lock, flags);
612 priv->shadowLSR = lsr;
613 spin_unlock_irqrestore(&priv->status_lock, flags);
614
615 *tty_flag = TTY_NORMAL;
616 if (lsr & UART_LSR_BRK_ERROR_BITS) {
617 /* we always want to update icount, but we only want to
618 * update tty_flag for one case */
619 if (lsr & UART_LSR_BI) {
620 priv->icount.brk++;
621 *tty_flag = TTY_BREAK;
622 usb_serial_handle_break(port);
623 }
624 if (lsr & UART_LSR_PE) {
625 priv->icount.parity++;
626 if (*tty_flag == TTY_NORMAL)
627 *tty_flag = TTY_PARITY;
628 }
629 if (lsr & UART_LSR_FE) {
630 priv->icount.frame++;
631 if (*tty_flag == TTY_NORMAL)
632 *tty_flag = TTY_FRAME;
633 }
634 if (lsr & UART_LSR_OE){
635 priv->icount.overrun++;
636 if (*tty_flag == TTY_NORMAL)
637 *tty_flag = TTY_OVERRUN;
638 }
639 }
640
641}
642
560static int ssu100_process_packet(struct tty_struct *tty, 643static int ssu100_process_packet(struct tty_struct *tty,
561 struct usb_serial_port *port, 644 struct usb_serial_port *port,
562 struct ssu100_port_private *priv, 645 struct ssu100_port_private *priv,
563 char *packet, int len) 646 char *packet, int len)
564{ 647{
565 int i; 648 int i;
566 char flag; 649 char flag = TTY_NORMAL;
567 char *ch; 650 char *ch;
568 651
569 dbg("%s - port %d", __func__, port->number); 652 dbg("%s - port %d", __func__, port->number);
570 653
571 if (len < 4) { 654 if ((len >= 4) &&
572 dbg("%s - malformed packet", __func__); 655 (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
573 return 0;
574 }
575
576 if ((packet[0] == 0x1b) && (packet[1] == 0x1b) &&
577 ((packet[2] == 0x00) || (packet[2] == 0x01))) { 656 ((packet[2] == 0x00) || (packet[2] == 0x01))) {
578 if (packet[2] == 0x00) 657 if (packet[2] == 0x00) {
579 priv->shadowLSR = packet[3] & (SERIAL_LSR_OE | 658 ssu100_update_lsr(port, packet[3], &flag);
580 SERIAL_LSR_PE | 659 if (flag == TTY_OVERRUN)
581 SERIAL_LSR_FE | 660 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
582 SERIAL_LSR_BI);
583
584 if (packet[2] == 0x01) {
585 priv->shadowMSR = packet[3];
586 wake_up_interruptible(&priv->delta_msr_wait);
587 } 661 }
662 if (packet[2] == 0x01)
663 ssu100_update_msr(port, packet[3]);
588 664
589 len -= 4; 665 len -= 4;
590 ch = packet + 4; 666 ch = packet + 4;
@@ -596,7 +672,7 @@ static int ssu100_process_packet(struct tty_struct *tty,
596 672
597 if (port->port.console && port->sysrq) { 673 if (port->port.console && port->sysrq) {
598 for (i = 0; i < len; i++, ch++) { 674 for (i = 0; i < len; i++, ch++) {
599 if (!usb_serial_handle_sysrq_char(tty, port, *ch)) 675 if (!usb_serial_handle_sysrq_char(port, *ch))
600 tty_insert_flip_char(tty, *ch, flag); 676 tty_insert_flip_char(tty, *ch, flag);
601 } 677 }
602 } else 678 } else
@@ -631,7 +707,6 @@ static void ssu100_process_read_urb(struct urb *urb)
631 tty_kref_put(tty); 707 tty_kref_put(tty);
632} 708}
633 709
634
635static struct usb_serial_driver ssu100_device = { 710static struct usb_serial_driver ssu100_device = {
636 .driver = { 711 .driver = {
637 .owner = THIS_MODULE, 712 .owner = THIS_MODULE,
@@ -653,6 +728,7 @@ static struct usb_serial_driver ssu100_device = {
653 .tiocmset = ssu100_tiocmset, 728 .tiocmset = ssu100_tiocmset,
654 .ioctl = ssu100_ioctl, 729 .ioctl = ssu100_ioctl,
655 .set_termios = ssu100_set_termios, 730 .set_termios = ssu100_set_termios,
731 .disconnect = usb_serial_generic_disconnect,
656}; 732};
657 733
658static int __init ssu100_init(void) 734static int __init ssu100_init(void)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 2a982e62963b..7a2177c79bde 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -736,6 +736,7 @@ int usb_serial_probe(struct usb_interface *interface,
736 736
737 serial = create_serial(dev, interface, type); 737 serial = create_serial(dev, interface, type);
738 if (!serial) { 738 if (!serial) {
739 module_put(type->driver.owner);
739 dev_err(&interface->dev, "%s - out of memory\n", __func__); 740 dev_err(&interface->dev, "%s - out of memory\n", __func__);
740 return -ENOMEM; 741 return -ENOMEM;
741 } 742 }
@@ -746,11 +747,11 @@ int usb_serial_probe(struct usb_interface *interface,
746 747
747 id = get_iface_id(type, interface); 748 id = get_iface_id(type, interface);
748 retval = type->probe(serial, id); 749 retval = type->probe(serial, id);
749 module_put(type->driver.owner);
750 750
751 if (retval) { 751 if (retval) {
752 dbg("sub driver rejected device"); 752 dbg("sub driver rejected device");
753 kfree(serial); 753 kfree(serial);
754 module_put(type->driver.owner);
754 return retval; 755 return retval;
755 } 756 }
756 } 757 }
@@ -822,6 +823,7 @@ int usb_serial_probe(struct usb_interface *interface,
822 if (num_bulk_in == 0 || num_bulk_out == 0) { 823 if (num_bulk_in == 0 || num_bulk_out == 0) {
823 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); 824 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
824 kfree(serial); 825 kfree(serial);
826 module_put(type->driver.owner);
825 return -ENODEV; 827 return -ENODEV;
826 } 828 }
827 } 829 }
@@ -835,22 +837,15 @@ int usb_serial_probe(struct usb_interface *interface,
835 dev_err(&interface->dev, 837 dev_err(&interface->dev,
836 "Generic device with no bulk out, not allowed.\n"); 838 "Generic device with no bulk out, not allowed.\n");
837 kfree(serial); 839 kfree(serial);
840 module_put(type->driver.owner);
838 return -EIO; 841 return -EIO;
839 } 842 }
840 } 843 }
841#endif 844#endif
842 if (!num_ports) { 845 if (!num_ports) {
843 /* if this device type has a calc_num_ports function, call it */ 846 /* if this device type has a calc_num_ports function, call it */
844 if (type->calc_num_ports) { 847 if (type->calc_num_ports)
845 if (!try_module_get(type->driver.owner)) {
846 dev_err(&interface->dev,
847 "module get failed, exiting\n");
848 kfree(serial);
849 return -EIO;
850 }
851 num_ports = type->calc_num_ports(serial); 848 num_ports = type->calc_num_ports(serial);
852 module_put(type->driver.owner);
853 }
854 if (!num_ports) 849 if (!num_ports)
855 num_ports = type->num_ports; 850 num_ports = type->num_ports;
856 } 851 }
@@ -1039,13 +1034,7 @@ int usb_serial_probe(struct usb_interface *interface,
1039 1034
1040 /* if this device type has an attach function, call it */ 1035 /* if this device type has an attach function, call it */
1041 if (type->attach) { 1036 if (type->attach) {
1042 if (!try_module_get(type->driver.owner)) {
1043 dev_err(&interface->dev,
1044 "module get failed, exiting\n");
1045 goto probe_error;
1046 }
1047 retval = type->attach(serial); 1037 retval = type->attach(serial);
1048 module_put(type->driver.owner);
1049 if (retval < 0) 1038 if (retval < 0)
1050 goto probe_error; 1039 goto probe_error;
1051 serial->attached = 1; 1040 serial->attached = 1;
@@ -1088,10 +1077,12 @@ int usb_serial_probe(struct usb_interface *interface,
1088exit: 1077exit:
1089 /* success */ 1078 /* success */
1090 usb_set_intfdata(interface, serial); 1079 usb_set_intfdata(interface, serial);
1080 module_put(type->driver.owner);
1091 return 0; 1081 return 0;
1092 1082
1093probe_error: 1083probe_error:
1094 usb_serial_put(serial); 1084 usb_serial_put(serial);
1085 module_put(type->driver.owner);
1095 return -EIO; 1086 return -EIO;
1096} 1087}
1097EXPORT_SYMBOL_GPL(usb_serial_probe); 1088EXPORT_SYMBOL_GPL(usb_serial_probe);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e05557d52999..c579dcc9200c 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
60 return 0; 60 return 0;
61} 61}
62 62
63static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
64{
65 INIT_LIST_HEAD(&work->node);
66 work->fn = fn;
67 init_waitqueue_head(&work->done);
68 work->flushing = 0;
69 work->queue_seq = work->done_seq = 0;
70}
71
63/* Init poll structure */ 72/* Init poll structure */
64void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 73void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
65 unsigned long mask, struct vhost_dev *dev) 74 unsigned long mask, struct vhost_dev *dev)
66{ 75{
67 struct vhost_work *work = &poll->work;
68
69 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 76 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
70 init_poll_funcptr(&poll->table, vhost_poll_func); 77 init_poll_funcptr(&poll->table, vhost_poll_func);
71 poll->mask = mask; 78 poll->mask = mask;
72 poll->dev = dev; 79 poll->dev = dev;
73 80
74 INIT_LIST_HEAD(&work->node); 81 vhost_work_init(&poll->work, fn);
75 work->fn = fn;
76 init_waitqueue_head(&work->done);
77 work->flushing = 0;
78 work->queue_seq = work->done_seq = 0;
79} 82}
80 83
81/* Start polling a file. We add ourselves to file's wait queue. The caller must 84/* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
95 remove_wait_queue(poll->wqh, &poll->wait); 98 remove_wait_queue(poll->wqh, &poll->wait);
96} 99}
97 100
98/* Flush any work that has been scheduled. When calling this, don't hold any 101static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
99 * locks that are also used by the callback. */
100void vhost_poll_flush(struct vhost_poll *poll)
101{ 102{
102 struct vhost_work *work = &poll->work;
103 unsigned seq; 103 unsigned seq;
104 int left; 104 int left;
105 int flushing; 105 int flushing;
106 106
107 spin_lock_irq(&poll->dev->work_lock); 107 spin_lock_irq(&dev->work_lock);
108 seq = work->queue_seq; 108 seq = work->queue_seq;
109 work->flushing++; 109 work->flushing++;
110 spin_unlock_irq(&poll->dev->work_lock); 110 spin_unlock_irq(&dev->work_lock);
111 wait_event(work->done, ({ 111 wait_event(work->done, ({
112 spin_lock_irq(&poll->dev->work_lock); 112 spin_lock_irq(&dev->work_lock);
113 left = seq - work->done_seq <= 0; 113 left = seq - work->done_seq <= 0;
114 spin_unlock_irq(&poll->dev->work_lock); 114 spin_unlock_irq(&dev->work_lock);
115 left; 115 left;
116 })); 116 }));
117 spin_lock_irq(&poll->dev->work_lock); 117 spin_lock_irq(&dev->work_lock);
118 flushing = --work->flushing; 118 flushing = --work->flushing;
119 spin_unlock_irq(&poll->dev->work_lock); 119 spin_unlock_irq(&dev->work_lock);
120 BUG_ON(flushing < 0); 120 BUG_ON(flushing < 0);
121} 121}
122 122
123void vhost_poll_queue(struct vhost_poll *poll) 123/* Flush any work that has been scheduled. When calling this, don't hold any
124 * locks that are also used by the callback. */
125void vhost_poll_flush(struct vhost_poll *poll)
126{
127 vhost_work_flush(poll->dev, &poll->work);
128}
129
130static inline void vhost_work_queue(struct vhost_dev *dev,
131 struct vhost_work *work)
124{ 132{
125 struct vhost_dev *dev = poll->dev;
126 struct vhost_work *work = &poll->work;
127 unsigned long flags; 133 unsigned long flags;
128 134
129 spin_lock_irqsave(&dev->work_lock, flags); 135 spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
135 spin_unlock_irqrestore(&dev->work_lock, flags); 141 spin_unlock_irqrestore(&dev->work_lock, flags);
136} 142}
137 143
144void vhost_poll_queue(struct vhost_poll *poll)
145{
146 vhost_work_queue(poll->dev, &poll->work);
147}
148
138static void vhost_vq_reset(struct vhost_dev *dev, 149static void vhost_vq_reset(struct vhost_dev *dev,
139 struct vhost_virtqueue *vq) 150 struct vhost_virtqueue *vq)
140{ 151{
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
236 return dev->mm == current->mm ? 0 : -EPERM; 247 return dev->mm == current->mm ? 0 : -EPERM;
237} 248}
238 249
250struct vhost_attach_cgroups_struct {
251 struct vhost_work work;
252 struct task_struct *owner;
253 int ret;
254};
255
256static void vhost_attach_cgroups_work(struct vhost_work *work)
257{
258 struct vhost_attach_cgroups_struct *s;
259 s = container_of(work, struct vhost_attach_cgroups_struct, work);
260 s->ret = cgroup_attach_task_all(s->owner, current);
261}
262
263static int vhost_attach_cgroups(struct vhost_dev *dev)
264{
265 struct vhost_attach_cgroups_struct attach;
266 attach.owner = current;
267 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
268 vhost_work_queue(dev, &attach.work);
269 vhost_work_flush(dev, &attach.work);
270 return attach.ret;
271}
272
239/* Caller should have device mutex */ 273/* Caller should have device mutex */
240static long vhost_dev_set_owner(struct vhost_dev *dev) 274static long vhost_dev_set_owner(struct vhost_dev *dev)
241{ 275{
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
255 } 289 }
256 290
257 dev->worker = worker; 291 dev->worker = worker;
258 err = cgroup_attach_task_current_cg(worker); 292 wake_up_process(worker); /* avoid contributing to loadavg */
293
294 err = vhost_attach_cgroups(dev);
259 if (err) 295 if (err)
260 goto err_cgroup; 296 goto err_cgroup;
261 wake_up_process(worker); /* avoid contributing to loadavg */
262 297
263 return 0; 298 return 0;
264err_cgroup: 299err_cgroup:
265 kthread_stop(worker); 300 kthread_stop(worker);
301 dev->worker = NULL;
266err_worker: 302err_worker:
267 if (dev->mm) 303 if (dev->mm)
268 mmput(dev->mm); 304 mmput(dev->mm);
@@ -323,7 +359,10 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
323 dev->mm = NULL; 359 dev->mm = NULL;
324 360
325 WARN_ON(!list_empty(&dev->work_list)); 361 WARN_ON(!list_empty(&dev->work_list));
326 kthread_stop(dev->worker); 362 if (dev->worker) {
363 kthread_stop(dev->worker);
364 dev->worker = NULL;
365 }
327} 366}
328 367
329static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 368static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index afe21e6eb544..1c2c68356ea7 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -80,7 +80,10 @@ static void clcdfb_disable(struct clcd_fb *fb)
80 /* 80 /*
81 * Disable CLCD clock source. 81 * Disable CLCD clock source.
82 */ 82 */
83 clk_disable(fb->clk); 83 if (fb->clk_enabled) {
84 fb->clk_enabled = false;
85 clk_disable(fb->clk);
86 }
84} 87}
85 88
86static void clcdfb_enable(struct clcd_fb *fb, u32 cntl) 89static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
@@ -88,7 +91,10 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
88 /* 91 /*
89 * Enable the CLCD clock source. 92 * Enable the CLCD clock source.
90 */ 93 */
91 clk_enable(fb->clk); 94 if (!fb->clk_enabled) {
95 fb->clk_enabled = true;
96 clk_enable(fb->clk);
97 }
92 98
93 /* 99 /*
94 * Bring up by first enabling.. 100 * Bring up by first enabling..
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index f3a4e15672d9..f96a471cb1a8 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) {
151static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) { 151static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) {
152#if defined(__alpha__) || defined(__i386__) || defined(__x86_64__) 152#if defined(__alpha__) || defined(__i386__) || defined(__x86_64__)
153 /* 153 /*
154 * memcpy_toio works for us if: 154 * iowrite32_rep works for us if:
155 * (1) Copies data as 32bit quantities, not byte after byte, 155 * (1) Copies data as 32bit quantities, not byte after byte,
156 * (2) Performs LE ordered stores, and 156 * (2) Performs LE ordered stores, and
157 * (3) It copes with unaligned source (destination is guaranteed to be page 157 * (3) It copes with unaligned source (destination is guaranteed to be page
158 * aligned and length is guaranteed to be multiple of 4). 158 * aligned and length is guaranteed to be multiple of 4).
159 */ 159 */
160 memcpy_toio(va.vaddr, src, len); 160 iowrite32_rep(va.vaddr, src, len >> 2);
161#else 161#else
162 u_int32_t __iomem* addr = va.vaddr; 162 u_int32_t __iomem* addr = va.vaddr;
163 163
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 72f91bff29c7..13365ba35218 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -112,6 +112,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
112#define VALID_EVTCHN(chn) ((chn) != 0) 112#define VALID_EVTCHN(chn) ((chn) != 0)
113 113
114static struct irq_chip xen_dynamic_chip; 114static struct irq_chip xen_dynamic_chip;
115static struct irq_chip xen_percpu_chip;
115 116
116/* Constructor for packed IRQ information. */ 117/* Constructor for packed IRQ information. */
117static struct irq_info mk_unbound_info(void) 118static struct irq_info mk_unbound_info(void)
@@ -377,7 +378,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
377 irq = find_unbound_irq(); 378 irq = find_unbound_irq();
378 379
379 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 380 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
380 handle_level_irq, "event"); 381 handle_edge_irq, "event");
381 382
382 evtchn_to_irq[evtchn] = irq; 383 evtchn_to_irq[evtchn] = irq;
383 irq_info[irq] = mk_evtchn_info(evtchn); 384 irq_info[irq] = mk_evtchn_info(evtchn);
@@ -403,8 +404,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
403 if (irq < 0) 404 if (irq < 0)
404 goto out; 405 goto out;
405 406
406 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 407 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
407 handle_level_irq, "ipi"); 408 handle_percpu_irq, "ipi");
408 409
409 bind_ipi.vcpu = cpu; 410 bind_ipi.vcpu = cpu;
410 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, 411 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
@@ -444,8 +445,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
444 445
445 irq = find_unbound_irq(); 446 irq = find_unbound_irq();
446 447
447 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, 448 set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
448 handle_level_irq, "virq"); 449 handle_percpu_irq, "virq");
449 450
450 evtchn_to_irq[evtchn] = irq; 451 evtchn_to_irq[evtchn] = irq;
451 irq_info[irq] = mk_virq_info(evtchn, virq); 452 irq_info[irq] = mk_virq_info(evtchn, virq);
@@ -964,6 +965,16 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
964 .retrigger = retrigger_dynirq, 965 .retrigger = retrigger_dynirq,
965}; 966};
966 967
968static struct irq_chip xen_percpu_chip __read_mostly = {
969 .name = "xen-percpu",
970
971 .disable = disable_dynirq,
972 .mask = disable_dynirq,
973 .unmask = enable_dynirq,
974
975 .ack = ack_dynirq,
976};
977
967int xen_set_callback_via(uint64_t via) 978int xen_set_callback_via(uint64_t via)
968{ 979{
969 struct xen_hvm_param a; 980 struct xen_hvm_param a;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 1799bd890315..ef9c7db52077 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -237,7 +237,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
237 goto again; 237 goto again;
238 238
239 if (sysrq_key != '\0') 239 if (sysrq_key != '\0')
240 handle_sysrq(sysrq_key, NULL); 240 handle_sysrq(sysrq_key);
241} 241}
242 242
243static struct xenbus_watch sysrq_watch = { 243static struct xenbus_watch sysrq_watch = {