aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/acpi/acpica/achware.h12
-rw-r--r--drivers/acpi/acpica/hwesleep.c19
-rw-r--r--drivers/acpi/acpica/hwsleep.c20
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c22
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/numa.c12
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/acpi/power.c4
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sleep.c75
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/common.c4
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/bcma/sprom.c4
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/char/agp/intel-agp.h39
-rw-r--r--drivers/char/agp/intel-gtt.c60
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c1
-rw-r--r--drivers/dma/imx-dma.c36
-rw-r--r--drivers/dma/tegra20-apb-dma.c18
-rw-r--r--drivers/gpio/gpio-langwell.c7
-rw-r--r--drivers/gpio/gpio-msic.c2
-rw-r--r--drivers/gpio/gpio-mxc.c5
-rw-r--r--drivers/gpio/gpio-pxa.c30
-rw-r--r--drivers/gpio/gpio-samsung.c14
-rw-r--r--drivers/gpio/gpio-sch.c3
-rw-r--r--drivers/gpu/drm/drm_edid_load.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c12
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c14
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h20
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c10
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c15
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c48
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c37
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c71
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h2
-rw-r--r--drivers/gpu/drm/radeon/ni.c14
-rw-r--r--drivers/gpu/drm/radeon/r600.c20
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c65
-rw-r--r--drivers/gpu/drm/radeon/r600d.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c13
-rw-r--r--drivers/gpu/drm/radeon/si.c35
-rw-r--r--drivers/gpu/drm/radeon/sid.h3
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/iommu/amd_iommu.c25
-rw-r--r--drivers/iommu/amd_iommu_init.c6
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c26
-rw-r--r--drivers/iommu/tegra-smmu.c17
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c12
-rw-r--r--drivers/isdn/mISDN/layer2.c2
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-lp8788.c2
-rw-r--r--drivers/leds/leds-renesas-tpu.c2
-rw-r--r--drivers/mtd/maps/uclinux.c5
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/appletalk/ltpc.c4
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c72
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c36
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c14
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/sfc/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c16
-rw-r--r--drivers/net/ethernet/sfc/tx.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c1
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/rndis_filter.c11
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/macvtap.c3
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c1
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/cdc-phonet.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/b43/main.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c13
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/main.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c3
-rw-r--r--drivers/pinctrl/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c1
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c1
-rw-r--r--drivers/pinctrl/pinctrl-u300.c8
-rw-r--r--drivers/platform/x86/classmate-laptop.c4
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c2
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp_accel.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c4
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c12
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/xo15-ebook.c2
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-cmos.c1
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/sh/intc/core.c27
-rw-r--r--drivers/usb/early/ehci-dbgp.c2
-rw-r--r--drivers/vhost/Kconfig3
-rw-r--r--drivers/vhost/Kconfig.tcm6
-rw-r--r--drivers/vhost/Makefile2
-rw-r--r--drivers/vhost/tcm_vhost.c1628
-rw-r--r--drivers/vhost/tcm_vhost.h101
-rw-r--r--drivers/zorro/zorro.c2
168 files changed, 2956 insertions, 774 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index ac7034129f3f..d5fdd36190cc 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -69,7 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = {
69}; 69};
70MODULE_DEVICE_TABLE(acpi, ac_device_ids); 70MODULE_DEVICE_TABLE(acpi, ac_device_ids);
71 71
72#ifdef CONFIG_PM_SLEEP
72static int acpi_ac_resume(struct device *dev); 73static int acpi_ac_resume(struct device *dev);
74#endif
73static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); 75static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
74 76
75static struct acpi_driver acpi_ac_driver = { 77static struct acpi_driver acpi_ac_driver = {
@@ -313,6 +315,7 @@ static int acpi_ac_add(struct acpi_device *device)
313 return result; 315 return result;
314} 316}
315 317
318#ifdef CONFIG_PM_SLEEP
316static int acpi_ac_resume(struct device *dev) 319static int acpi_ac_resume(struct device *dev)
317{ 320{
318 struct acpi_ac *ac; 321 struct acpi_ac *ac;
@@ -332,6 +335,7 @@ static int acpi_ac_resume(struct device *dev)
332 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); 335 kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
333 return 0; 336 return 0;
334} 337}
338#endif
335 339
336static int acpi_ac_remove(struct acpi_device *device, int type) 340static int acpi_ac_remove(struct acpi_device *device, int type)
337{ 341{
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 5ccb99ae3a6f..5de4ec72766d 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void);
83/* 83/*
84 * hwsleep - sleep/wake support (Legacy sleep registers) 84 * hwsleep - sleep/wake support (Legacy sleep registers)
85 */ 85 */
86acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags); 86acpi_status acpi_hw_legacy_sleep(u8 sleep_state);
87 87
88acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags); 88acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state);
89 89
90acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags); 90acpi_status acpi_hw_legacy_wake(u8 sleep_state);
91 91
92/* 92/*
93 * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers) 93 * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
94 */ 94 */
95void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument); 95void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
96 96
97acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags); 97acpi_status acpi_hw_extended_sleep(u8 sleep_state);
98 98
99acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags); 99acpi_status acpi_hw_extended_wake_prep(u8 sleep_state);
100 100
101acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags); 101acpi_status acpi_hw_extended_wake(u8 sleep_state);
102 102
103/* 103/*
104 * hwvalid - Port I/O with validation 104 * hwvalid - Port I/O with validation
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 48518dac5342..94996f9ae3ad 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -90,7 +90,6 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
90 * FUNCTION: acpi_hw_extended_sleep 90 * FUNCTION: acpi_hw_extended_sleep
91 * 91 *
92 * PARAMETERS: sleep_state - Which sleep state to enter 92 * PARAMETERS: sleep_state - Which sleep state to enter
93 * flags - ACPI_EXECUTE_GTS to run optional method
94 * 93 *
95 * RETURN: Status 94 * RETURN: Status
96 * 95 *
@@ -100,7 +99,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
100 * 99 *
101 ******************************************************************************/ 100 ******************************************************************************/
102 101
103acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) 102acpi_status acpi_hw_extended_sleep(u8 sleep_state)
104{ 103{
105 acpi_status status; 104 acpi_status status;
106 u8 sleep_type_value; 105 u8 sleep_type_value;
@@ -125,12 +124,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
125 124
126 acpi_gbl_system_awake_and_running = FALSE; 125 acpi_gbl_system_awake_and_running = FALSE;
127 126
128 /* Optionally execute _GTS (Going To Sleep) */
129
130 if (flags & ACPI_EXECUTE_GTS) {
131 acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
132 }
133
134 /* Flush caches, as per ACPI specification */ 127 /* Flush caches, as per ACPI specification */
135 128
136 ACPI_FLUSH_CPU_CACHE(); 129 ACPI_FLUSH_CPU_CACHE();
@@ -172,7 +165,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
172 * FUNCTION: acpi_hw_extended_wake_prep 165 * FUNCTION: acpi_hw_extended_wake_prep
173 * 166 *
174 * PARAMETERS: sleep_state - Which sleep state we just exited 167 * PARAMETERS: sleep_state - Which sleep state we just exited
175 * flags - ACPI_EXECUTE_BFS to run optional method
176 * 168 *
177 * RETURN: Status 169 * RETURN: Status
178 * 170 *
@@ -181,7 +173,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
181 * 173 *
182 ******************************************************************************/ 174 ******************************************************************************/
183 175
184acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) 176acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
185{ 177{
186 acpi_status status; 178 acpi_status status;
187 u8 sleep_type_value; 179 u8 sleep_type_value;
@@ -200,11 +192,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
200 &acpi_gbl_FADT.sleep_control); 192 &acpi_gbl_FADT.sleep_control);
201 } 193 }
202 194
203 /* Optionally execute _BFS (Back From Sleep) */
204
205 if (flags & ACPI_EXECUTE_BFS) {
206 acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
207 }
208 return_ACPI_STATUS(AE_OK); 195 return_ACPI_STATUS(AE_OK);
209} 196}
210 197
@@ -222,7 +209,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
222 * 209 *
223 ******************************************************************************/ 210 ******************************************************************************/
224 211
225acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags) 212acpi_status acpi_hw_extended_wake(u8 sleep_state)
226{ 213{
227 ACPI_FUNCTION_TRACE(hw_extended_wake); 214 ACPI_FUNCTION_TRACE(hw_extended_wake);
228 215
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 9960fe9ef533..3fddde056a5e 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -56,7 +56,6 @@ ACPI_MODULE_NAME("hwsleep")
56 * FUNCTION: acpi_hw_legacy_sleep 56 * FUNCTION: acpi_hw_legacy_sleep
57 * 57 *
58 * PARAMETERS: sleep_state - Which sleep state to enter 58 * PARAMETERS: sleep_state - Which sleep state to enter
59 * flags - ACPI_EXECUTE_GTS to run optional method
60 * 59 *
61 * RETURN: Status 60 * RETURN: Status
62 * 61 *
@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("hwsleep")
64 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 63 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
65 * 64 *
66 ******************************************************************************/ 65 ******************************************************************************/
67acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) 66acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
68{ 67{
69 struct acpi_bit_register_info *sleep_type_reg_info; 68 struct acpi_bit_register_info *sleep_type_reg_info;
70 struct acpi_bit_register_info *sleep_enable_reg_info; 69 struct acpi_bit_register_info *sleep_enable_reg_info;
@@ -110,12 +109,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
110 return_ACPI_STATUS(status); 109 return_ACPI_STATUS(status);
111 } 110 }
112 111
113 /* Optionally execute _GTS (Going To Sleep) */
114
115 if (flags & ACPI_EXECUTE_GTS) {
116 acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
117 }
118
119 /* Get current value of PM1A control */ 112 /* Get current value of PM1A control */
120 113
121 status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, 114 status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
@@ -214,7 +207,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
214 * FUNCTION: acpi_hw_legacy_wake_prep 207 * FUNCTION: acpi_hw_legacy_wake_prep
215 * 208 *
216 * PARAMETERS: sleep_state - Which sleep state we just exited 209 * PARAMETERS: sleep_state - Which sleep state we just exited
217 * flags - ACPI_EXECUTE_BFS to run optional method
218 * 210 *
219 * RETURN: Status 211 * RETURN: Status
220 * 212 *
@@ -224,7 +216,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
224 * 216 *
225 ******************************************************************************/ 217 ******************************************************************************/
226 218
227acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) 219acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
228{ 220{
229 acpi_status status; 221 acpi_status status;
230 struct acpi_bit_register_info *sleep_type_reg_info; 222 struct acpi_bit_register_info *sleep_type_reg_info;
@@ -275,11 +267,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
275 } 267 }
276 } 268 }
277 269
278 /* Optionally execute _BFS (Back From Sleep) */
279
280 if (flags & ACPI_EXECUTE_BFS) {
281 acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
282 }
283 return_ACPI_STATUS(status); 270 return_ACPI_STATUS(status);
284} 271}
285 272
@@ -288,7 +275,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
288 * FUNCTION: acpi_hw_legacy_wake 275 * FUNCTION: acpi_hw_legacy_wake
289 * 276 *
290 * PARAMETERS: sleep_state - Which sleep state we just exited 277 * PARAMETERS: sleep_state - Which sleep state we just exited
291 * flags - Reserved, set to zero
292 * 278 *
293 * RETURN: Status 279 * RETURN: Status
294 * 280 *
@@ -297,7 +283,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
297 * 283 *
298 ******************************************************************************/ 284 ******************************************************************************/
299 285
300acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags) 286acpi_status acpi_hw_legacy_wake(u8 sleep_state)
301{ 287{
302 acpi_status status; 288 acpi_status status;
303 289
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index f8684bfe7907..1f165a750ae2 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep")
50 50
51/* Local prototypes */ 51/* Local prototypes */
52static acpi_status 52static acpi_status
53acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id); 53acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
54 54
55/* 55/*
56 * Dispatch table used to efficiently branch to the various sleep 56 * Dispatch table used to efficiently branch to the various sleep
@@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
235 * 235 *
236 ******************************************************************************/ 236 ******************************************************************************/
237static acpi_status 237static acpi_status
238acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) 238acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
239{ 239{
240 acpi_status status; 240 acpi_status status;
241 struct acpi_sleep_functions *sleep_functions = 241 struct acpi_sleep_functions *sleep_functions =
@@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
248 * use the extended sleep registers 248 * use the extended sleep registers
249 */ 249 */
250 if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) { 250 if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
251 status = sleep_functions->extended_function(sleep_state, flags); 251 status = sleep_functions->extended_function(sleep_state);
252 } else { 252 } else {
253 /* Legacy sleep */ 253 /* Legacy sleep */
254 254
255 status = sleep_functions->legacy_function(sleep_state, flags); 255 status = sleep_functions->legacy_function(sleep_state);
256 } 256 }
257 257
258 return (status); 258 return (status);
@@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
262 * For the case where reduced-hardware-only code is being generated, 262 * For the case where reduced-hardware-only code is being generated,
263 * we know that only the extended sleep registers are available 263 * we know that only the extended sleep registers are available
264 */ 264 */
265 status = sleep_functions->extended_function(sleep_state, flags); 265 status = sleep_functions->extended_function(sleep_state);
266 return (status); 266 return (status);
267 267
268#endif /* !ACPI_REDUCED_HARDWARE */ 268#endif /* !ACPI_REDUCED_HARDWARE */
@@ -349,7 +349,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
349 * FUNCTION: acpi_enter_sleep_state 349 * FUNCTION: acpi_enter_sleep_state
350 * 350 *
351 * PARAMETERS: sleep_state - Which sleep state to enter 351 * PARAMETERS: sleep_state - Which sleep state to enter
352 * flags - ACPI_EXECUTE_GTS to run optional method
353 * 352 *
354 * RETURN: Status 353 * RETURN: Status
355 * 354 *
@@ -357,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
357 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED 356 * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
358 * 357 *
359 ******************************************************************************/ 358 ******************************************************************************/
360acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags) 359acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
361{ 360{
362 acpi_status status; 361 acpi_status status;
363 362
@@ -371,7 +370,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
371 } 370 }
372 371
373 status = 372 status =
374 acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID); 373 acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
375 return_ACPI_STATUS(status); 374 return_ACPI_STATUS(status);
376} 375}
377 376
@@ -391,14 +390,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
391 * Called with interrupts DISABLED. 390 * Called with interrupts DISABLED.
392 * 391 *
393 ******************************************************************************/ 392 ******************************************************************************/
394acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags) 393acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
395{ 394{
396 acpi_status status; 395 acpi_status status;
397 396
398 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); 397 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
399 398
400 status = 399 status =
401 acpi_hw_sleep_dispatch(sleep_state, flags, 400 acpi_hw_sleep_dispatch(sleep_state,
402 ACPI_WAKE_PREP_FUNCTION_ID); 401 ACPI_WAKE_PREP_FUNCTION_ID);
403 return_ACPI_STATUS(status); 402 return_ACPI_STATUS(status);
404} 403}
@@ -423,8 +422,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
423 422
424 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); 423 ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
425 424
426 425 status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID);
427 status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
428 return_ACPI_STATUS(status); 426 return_ACPI_STATUS(status);
429} 427}
430 428
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index ff2c876ec412..45e3e1759fb8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1052,6 +1052,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
1052 return 0; 1052 return 0;
1053} 1053}
1054 1054
1055#ifdef CONFIG_PM_SLEEP
1055/* this is needed to learn about changes made in suspended state */ 1056/* this is needed to learn about changes made in suspended state */
1056static int acpi_battery_resume(struct device *dev) 1057static int acpi_battery_resume(struct device *dev)
1057{ 1058{
@@ -1068,6 +1069,7 @@ static int acpi_battery_resume(struct device *dev)
1068 acpi_battery_update(battery); 1069 acpi_battery_update(battery);
1069 return 0; 1070 return 0;
1070} 1071}
1072#endif
1071 1073
1072static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume); 1074static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
1073 1075
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 79d4c22f7a6d..314a3b84bbc7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -78,7 +78,9 @@ static int acpi_button_add(struct acpi_device *device);
78static int acpi_button_remove(struct acpi_device *device, int type); 78static int acpi_button_remove(struct acpi_device *device, int type);
79static void acpi_button_notify(struct acpi_device *device, u32 event); 79static void acpi_button_notify(struct acpi_device *device, u32 event);
80 80
81#ifdef CONFIG_PM_SLEEP
81static int acpi_button_resume(struct device *dev); 82static int acpi_button_resume(struct device *dev);
83#endif
82static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); 84static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
83 85
84static struct acpi_driver acpi_button_driver = { 86static struct acpi_driver acpi_button_driver = {
@@ -310,6 +312,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
310 } 312 }
311} 313}
312 314
315#ifdef CONFIG_PM_SLEEP
313static int acpi_button_resume(struct device *dev) 316static int acpi_button_resume(struct device *dev)
314{ 317{
315 struct acpi_device *device = to_acpi_device(dev); 318 struct acpi_device *device = to_acpi_device(dev);
@@ -319,6 +322,7 @@ static int acpi_button_resume(struct device *dev)
319 return acpi_lid_send_state(device); 322 return acpi_lid_send_state(device);
320 return 0; 323 return 0;
321} 324}
325#endif
322 326
323static int acpi_button_add(struct acpi_device *device) 327static int acpi_button_add(struct acpi_device *device)
324{ 328{
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 669d9ee80d16..bc36a476f1ab 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -53,8 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = {
53}; 53};
54MODULE_DEVICE_TABLE(acpi, fan_device_ids); 54MODULE_DEVICE_TABLE(acpi, fan_device_ids);
55 55
56#ifdef CONFIG_PM_SLEEP
56static int acpi_fan_suspend(struct device *dev); 57static int acpi_fan_suspend(struct device *dev);
57static int acpi_fan_resume(struct device *dev); 58static int acpi_fan_resume(struct device *dev);
59#endif
58static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume); 60static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
59 61
60static struct acpi_driver acpi_fan_driver = { 62static struct acpi_driver acpi_fan_driver = {
@@ -184,6 +186,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
184 return 0; 186 return 0;
185} 187}
186 188
189#ifdef CONFIG_PM_SLEEP
187static int acpi_fan_suspend(struct device *dev) 190static int acpi_fan_suspend(struct device *dev)
188{ 191{
189 if (!dev) 192 if (!dev)
@@ -207,6 +210,7 @@ static int acpi_fan_resume(struct device *dev)
207 210
208 return result; 211 return result;
209} 212}
213#endif
210 214
211static int __init acpi_fan_init(void) 215static int __init acpi_fan_init(void)
212{ 216{
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e56f3be7b07d..cb31298ca684 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -237,6 +237,8 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
237 return 0; 237 return 0;
238} 238}
239 239
240static int __initdata parsed_numa_memblks;
241
240static int __init 242static int __init
241acpi_parse_memory_affinity(struct acpi_subtable_header * header, 243acpi_parse_memory_affinity(struct acpi_subtable_header * header,
242 const unsigned long end) 244 const unsigned long end)
@@ -250,8 +252,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
250 acpi_table_print_srat_entry(header); 252 acpi_table_print_srat_entry(header);
251 253
252 /* let architecture-dependent part to do it */ 254 /* let architecture-dependent part to do it */
253 acpi_numa_memory_affinity_init(memory_affinity); 255 if (!acpi_numa_memory_affinity_init(memory_affinity))
254 256 parsed_numa_memblks++;
255 return 0; 257 return 0;
256} 258}
257 259
@@ -304,8 +306,10 @@ int __init acpi_numa_init(void)
304 306
305 acpi_numa_arch_fixup(); 307 acpi_numa_arch_fixup();
306 308
307 if (cnt <= 0) 309 if (cnt < 0)
308 return cnt ?: -ENOENT; 310 return cnt;
311 else if (!parsed_numa_memblks)
312 return -ENOENT;
309 return 0; 313 return 0;
310} 314}
311 315
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ec54014c321c..72a2c98bc429 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -573,8 +573,15 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
573 OSC_CLOCK_PWR_CAPABILITY_SUPPORT; 573 OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
574 if (pci_msi_enabled()) 574 if (pci_msi_enabled())
575 flags |= OSC_MSI_SUPPORT; 575 flags |= OSC_MSI_SUPPORT;
576 if (flags != base_flags) 576 if (flags != base_flags) {
577 acpi_pci_osc_support(root, flags); 577 status = acpi_pci_osc_support(root, flags);
578 if (ACPI_FAILURE(status)) {
579 dev_info(root->bus->bridge, "ACPI _OSC support "
580 "notification failed, disabling PCIe ASPM\n");
581 pcie_no_aspm();
582 flags = base_flags;
583 }
584 }
578 585
579 if (!pcie_ports_disabled 586 if (!pcie_ports_disabled
580 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { 587 && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 215ecd097408..fc1803414629 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -67,7 +67,9 @@ static const struct acpi_device_id power_device_ids[] = {
67}; 67};
68MODULE_DEVICE_TABLE(acpi, power_device_ids); 68MODULE_DEVICE_TABLE(acpi, power_device_ids);
69 69
70#ifdef CONFIG_PM_SLEEP
70static int acpi_power_resume(struct device *dev); 71static int acpi_power_resume(struct device *dev);
72#endif
71static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume); 73static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
72 74
73static struct acpi_driver acpi_power_driver = { 75static struct acpi_driver acpi_power_driver = {
@@ -775,6 +777,7 @@ static int acpi_power_remove(struct acpi_device *device, int type)
775 return 0; 777 return 0;
776} 778}
777 779
780#ifdef CONFIG_PM_SLEEP
778static int acpi_power_resume(struct device *dev) 781static int acpi_power_resume(struct device *dev)
779{ 782{
780 int result = 0, state; 783 int result = 0, state;
@@ -803,6 +806,7 @@ static int acpi_power_resume(struct device *dev)
803 806
804 return result; 807 return result;
805} 808}
809#endif
806 810
807int __init acpi_power_init(void) 811int __init acpi_power_init(void)
808{ 812{
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index ff8e04f2fab4..bfc31cb0dd3e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -437,7 +437,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
437 /* Normal CPU soft online event */ 437 /* Normal CPU soft online event */
438 } else { 438 } else {
439 acpi_processor_ppc_has_changed(pr, 0); 439 acpi_processor_ppc_has_changed(pr, 0);
440 acpi_processor_cst_has_changed(pr); 440 acpi_processor_hotplug(pr);
441 acpi_processor_reevaluate_tstate(pr, action); 441 acpi_processor_reevaluate_tstate(pr, action);
442 acpi_processor_tstate_has_changed(pr); 442 acpi_processor_tstate_has_changed(pr);
443 } 443 }
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index c0b9aa5faf4c..ff0740e0a9c2 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -988,6 +988,7 @@ static void acpi_sbs_rmdirs(void)
988#endif 988#endif
989} 989}
990 990
991#ifdef CONFIG_PM_SLEEP
991static int acpi_sbs_resume(struct device *dev) 992static int acpi_sbs_resume(struct device *dev)
992{ 993{
993 struct acpi_sbs *sbs; 994 struct acpi_sbs *sbs;
@@ -997,6 +998,7 @@ static int acpi_sbs_resume(struct device *dev)
997 acpi_sbs_callback(sbs); 998 acpi_sbs_callback(sbs);
998 return 0; 999 return 0;
999} 1000}
1001#endif
1000 1002
1001static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume); 1003static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
1002 1004
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 7a7a9c929247..fdcdbb652915 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,36 +28,7 @@
28#include "internal.h" 28#include "internal.h"
29#include "sleep.h" 29#include "sleep.h"
30 30
31u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
32static unsigned int gts, bfs;
33static int set_param_wake_flag(const char *val, struct kernel_param *kp)
34{
35 int ret = param_set_int(val, kp);
36
37 if (ret)
38 return ret;
39
40 if (kp->arg == (const char *)&gts) {
41 if (gts)
42 wake_sleep_flags |= ACPI_EXECUTE_GTS;
43 else
44 wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
45 }
46 if (kp->arg == (const char *)&bfs) {
47 if (bfs)
48 wake_sleep_flags |= ACPI_EXECUTE_BFS;
49 else
50 wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
51 }
52 return ret;
53}
54module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
55module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
56MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
57MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
58
59static u8 sleep_states[ACPI_S_STATE_COUNT]; 31static u8 sleep_states[ACPI_S_STATE_COUNT];
60static bool pwr_btn_event_pending;
61 32
62static void acpi_sleep_tts_switch(u32 acpi_state) 33static void acpi_sleep_tts_switch(u32 acpi_state)
63{ 34{
@@ -110,6 +81,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
110 81
111#ifdef CONFIG_ACPI_SLEEP 82#ifdef CONFIG_ACPI_SLEEP
112static u32 acpi_target_sleep_state = ACPI_STATE_S0; 83static u32 acpi_target_sleep_state = ACPI_STATE_S0;
84static bool pwr_btn_event_pending;
113 85
114/* 86/*
115 * The ACPI specification wants us to save NVS memory regions during hibernation 87 * The ACPI specification wants us to save NVS memory regions during hibernation
@@ -305,7 +277,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
305 switch (acpi_state) { 277 switch (acpi_state) {
306 case ACPI_STATE_S1: 278 case ACPI_STATE_S1:
307 barrier(); 279 barrier();
308 status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags); 280 status = acpi_enter_sleep_state(acpi_state);
309 break; 281 break;
310 282
311 case ACPI_STATE_S3: 283 case ACPI_STATE_S3:
@@ -319,8 +291,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
319 /* This violates the spec but is required for bug compatibility. */ 291 /* This violates the spec but is required for bug compatibility. */
320 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); 292 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
321 293
322 /* Reprogram control registers and execute _BFS */ 294 /* Reprogram control registers */
323 acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags); 295 acpi_leave_sleep_state_prep(acpi_state);
324 296
325 /* ACPI 3.0 specs (P62) says that it's the responsibility 297 /* ACPI 3.0 specs (P62) says that it's the responsibility
326 * of the OSPM to clear the status bit [ implying that the 298 * of the OSPM to clear the status bit [ implying that the
@@ -603,9 +575,9 @@ static int acpi_hibernation_enter(void)
603 ACPI_FLUSH_CPU_CACHE(); 575 ACPI_FLUSH_CPU_CACHE();
604 576
605 /* This shouldn't return. If it returns, we have a problem */ 577 /* This shouldn't return. If it returns, we have a problem */
606 status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags); 578 status = acpi_enter_sleep_state(ACPI_STATE_S4);
607 /* Reprogram control registers and execute _BFS */ 579 /* Reprogram control registers */
608 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); 580 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
609 581
610 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 582 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
611} 583}
@@ -617,8 +589,8 @@ static void acpi_hibernation_leave(void)
617 * enable it here. 589 * enable it here.
618 */ 590 */
619 acpi_enable(); 591 acpi_enable();
620 /* Reprogram control registers and execute _BFS */ 592 /* Reprogram control registers */
621 acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); 593 acpi_leave_sleep_state_prep(ACPI_STATE_S4);
622 /* Check the hardware signature */ 594 /* Check the hardware signature */
623 if (facs && s4_hardware_signature != facs->hardware_signature) { 595 if (facs && s4_hardware_signature != facs->hardware_signature) {
624 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " 596 printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -892,33 +864,7 @@ static void acpi_power_off(void)
892 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ 864 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
893 printk(KERN_DEBUG "%s called\n", __func__); 865 printk(KERN_DEBUG "%s called\n", __func__);
894 local_irq_disable(); 866 local_irq_disable();
895 acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags); 867 acpi_enter_sleep_state(ACPI_STATE_S5);
896}
897
898/*
899 * ACPI 2.0 created the optional _GTS and _BFS,
900 * but industry adoption has been neither rapid nor broad.
901 *
902 * Linux gets into trouble when it executes poorly validated
903 * paths through the BIOS, so disable _GTS and _BFS by default,
904 * but do speak up and offer the option to enable them.
905 */
906static void __init acpi_gts_bfs_check(void)
907{
908 acpi_handle dummy;
909
910 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
911 {
912 printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
913 printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
914 "please notify linux-acpi@vger.kernel.org\n");
915 }
916 if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
917 {
918 printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
919 printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
920 "please notify linux-acpi@vger.kernel.org\n");
921 }
922} 868}
923 869
924int __init acpi_sleep_init(void) 870int __init acpi_sleep_init(void)
@@ -979,6 +925,5 @@ int __init acpi_sleep_init(void)
979 * object can also be evaluated when the system enters S5. 925 * object can also be evaluated when the system enters S5.
980 */ 926 */
981 register_reboot_notifier(&tts_notifier); 927 register_reboot_notifier(&tts_notifier);
982 acpi_gts_bfs_check();
983 return 0; 928 return 0;
984} 929}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 240a24400976..7c3f98ba4afe 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
173{ 173{
174 int result = 0; 174 int result = 0;
175 175
176 if (!strncmp(val, "enable", strlen("enable"))) { 176 if (!strncmp(val, "enable", sizeof("enable") - 1)) {
177 result = acpi_debug_trace(trace_method_name, trace_debug_level, 177 result = acpi_debug_trace(trace_method_name, trace_debug_level,
178 trace_debug_layer, 0); 178 trace_debug_layer, 0);
179 if (result) 179 if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
181 goto exit; 181 goto exit;
182 } 182 }
183 183
184 if (!strncmp(val, "disable", strlen("disable"))) { 184 if (!strncmp(val, "disable", sizeof("disable") - 1)) {
185 int name = 0; 185 int name = 0;
186 result = acpi_debug_trace((char *)&name, trace_debug_level, 186 result = acpi_debug_trace((char *)&name, trace_debug_level,
187 trace_debug_layer, 0); 187 trace_debug_layer, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 9fe90e9fecb5..edda74a43406 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -106,7 +106,9 @@ static const struct acpi_device_id thermal_device_ids[] = {
106}; 106};
107MODULE_DEVICE_TABLE(acpi, thermal_device_ids); 107MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
108 108
109#ifdef CONFIG_PM_SLEEP
109static int acpi_thermal_resume(struct device *dev); 110static int acpi_thermal_resume(struct device *dev);
111#endif
110static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume); 112static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
111 113
112static struct acpi_driver acpi_thermal_driver = { 114static struct acpi_driver acpi_thermal_driver = {
@@ -1041,6 +1043,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
1041 return 0; 1043 return 0;
1042} 1044}
1043 1045
1046#ifdef CONFIG_PM_SLEEP
1044static int acpi_thermal_resume(struct device *dev) 1047static int acpi_thermal_resume(struct device *dev)
1045{ 1048{
1046 struct acpi_thermal *tz; 1049 struct acpi_thermal *tz;
@@ -1075,6 +1078,7 @@ static int acpi_thermal_resume(struct device *dev)
1075 1078
1076 return AE_OK; 1079 return AE_OK;
1077} 1080}
1081#endif
1078 1082
1079static int thermal_act(const struct dmi_system_id *d) { 1083static int thermal_act(const struct dmi_system_id *d) {
1080 1084
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index d4386019af5d..96cce6d53195 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev)
2362 { 2362 {
2363 printk(DEV_LABEL " (itf %d): can't set up page mapping\n", 2363 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2364 dev->number); 2364 dev->number);
2365 return error; 2365 return -ENOMEM;
2366 } 2366 }
2367 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n", 2367 IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2368 dev->number, iadev->pci->revision, base, iadev->irq);) 2368 dev->number, iadev->pci->revision, base, iadev->irq);)
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 869d7ff2227f..eb78e9640c4a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -169,8 +169,7 @@ void pm_clk_init(struct device *dev)
169 */ 169 */
170int pm_clk_create(struct device *dev) 170int pm_clk_create(struct device *dev)
171{ 171{
172 int ret = dev_pm_get_subsys_data(dev); 172 return dev_pm_get_subsys_data(dev);
173 return ret < 0 ? ret : 0;
174} 173}
175 174
176/** 175/**
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index a14085cc613f..39c32529b833 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -24,7 +24,6 @@
24int dev_pm_get_subsys_data(struct device *dev) 24int dev_pm_get_subsys_data(struct device *dev)
25{ 25{
26 struct pm_subsys_data *psd; 26 struct pm_subsys_data *psd;
27 int ret = 0;
28 27
29 psd = kzalloc(sizeof(*psd), GFP_KERNEL); 28 psd = kzalloc(sizeof(*psd), GFP_KERNEL);
30 if (!psd) 29 if (!psd)
@@ -40,7 +39,6 @@ int dev_pm_get_subsys_data(struct device *dev)
40 dev->power.subsys_data = psd; 39 dev->power.subsys_data = psd;
41 pm_clk_init(dev); 40 pm_clk_init(dev);
42 psd = NULL; 41 psd = NULL;
43 ret = 1;
44 } 42 }
45 43
46 spin_unlock_irq(&dev->power.lock); 44 spin_unlock_irq(&dev->power.lock);
@@ -48,7 +46,7 @@ int dev_pm_get_subsys_data(struct device *dev)
48 /* kfree() verifies that its argument is nonzero. */ 46 /* kfree() verifies that its argument is nonzero. */
49 kfree(psd); 47 kfree(psd);
50 48
51 return ret; 49 return 0;
52} 50}
53EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); 51EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
54 52
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 11b32d2642df..a6e5672c67e7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -272,6 +272,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 272 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 273 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, 274 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
275 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, 276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
276 { 0, }, 277 { 0, },
277}; 278};
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 26823d97fd9f..9ea4627dc0c2 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
507 /* for these chips OTP is always available */ 507 /* for these chips OTP is always available */
508 present = true; 508 present = true;
509 break; 509 break;
510 510 case BCMA_CHIP_ID_BCM43228:
511 present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
512 break;
511 default: 513 default:
512 present = false; 514 present = false;
513 break; 515 break;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 2e0e7fc1dbba..dbe6135a2abe 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -3537,9 +3537,9 @@ static void drbd_cleanup(void)
3537} 3537}
3538 3538
3539/** 3539/**
3540 * drbd_congested() - Callback for pdflush 3540 * drbd_congested() - Callback for the flusher thread
3541 * @congested_data: User data 3541 * @congested_data: User data
3542 * @bdi_bits: Bits pdflush is currently interested in 3542 * @bdi_bits: Bits the BDI flusher thread is currently interested in
3543 * 3543 *
3544 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested. 3544 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3545 */ 3545 */
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 57226424690c..6f007b6c240d 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -239,16 +239,45 @@
239#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A 239#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
240#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ 240#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
241#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 241#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
242#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ 242#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
243#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 243#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
244#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 244#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
245#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ 245#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
246#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
246#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 247#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
247#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 248#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
248#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ 249#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
250#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
249#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a 251#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
250#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a 252#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
251#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */ 253#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
252#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 254#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
255#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
256#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
257#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
258#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
259#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
260#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
261#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
262#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
263#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
264#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
265#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
266#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
267#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
268#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
269#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
270#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
271#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
272#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
273#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
274#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
275#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
276#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
277#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
278#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
279#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
280#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
281#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
253 282
254#endif 283#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9ed92ef5829b..08fc5cbb13cd 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1502,15 +1502,73 @@ static const struct intel_gtt_driver_description {
1502 "Haswell", &sandybridge_gtt_driver }, 1502 "Haswell", &sandybridge_gtt_driver },
1503 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, 1503 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
1504 "Haswell", &sandybridge_gtt_driver }, 1504 "Haswell", &sandybridge_gtt_driver },
1505 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
1506 "Haswell", &sandybridge_gtt_driver },
1505 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, 1507 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
1506 "Haswell", &sandybridge_gtt_driver }, 1508 "Haswell", &sandybridge_gtt_driver },
1507 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, 1509 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
1508 "Haswell", &sandybridge_gtt_driver }, 1510 "Haswell", &sandybridge_gtt_driver },
1511 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
1512 "Haswell", &sandybridge_gtt_driver },
1509 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, 1513 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
1510 "Haswell", &sandybridge_gtt_driver }, 1514 "Haswell", &sandybridge_gtt_driver },
1511 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, 1515 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
1512 "Haswell", &sandybridge_gtt_driver }, 1516 "Haswell", &sandybridge_gtt_driver },
1513 { PCI_DEVICE_ID_INTEL_HASWELL_SDV, 1517 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
1518 "Haswell", &sandybridge_gtt_driver },
1519 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
1520 "Haswell", &sandybridge_gtt_driver },
1521 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
1522 "Haswell", &sandybridge_gtt_driver },
1523 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
1524 "Haswell", &sandybridge_gtt_driver },
1525 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
1526 "Haswell", &sandybridge_gtt_driver },
1527 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
1528 "Haswell", &sandybridge_gtt_driver },
1529 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
1530 "Haswell", &sandybridge_gtt_driver },
1531 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
1532 "Haswell", &sandybridge_gtt_driver },
1533 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
1534 "Haswell", &sandybridge_gtt_driver },
1535 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
1536 "Haswell", &sandybridge_gtt_driver },
1537 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
1538 "Haswell", &sandybridge_gtt_driver },
1539 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
1540 "Haswell", &sandybridge_gtt_driver },
1541 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
1542 "Haswell", &sandybridge_gtt_driver },
1543 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
1544 "Haswell", &sandybridge_gtt_driver },
1545 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
1546 "Haswell", &sandybridge_gtt_driver },
1547 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
1548 "Haswell", &sandybridge_gtt_driver },
1549 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
1550 "Haswell", &sandybridge_gtt_driver },
1551 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
1552 "Haswell", &sandybridge_gtt_driver },
1553 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
1554 "Haswell", &sandybridge_gtt_driver },
1555 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
1556 "Haswell", &sandybridge_gtt_driver },
1557 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
1558 "Haswell", &sandybridge_gtt_driver },
1559 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
1560 "Haswell", &sandybridge_gtt_driver },
1561 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
1562 "Haswell", &sandybridge_gtt_driver },
1563 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
1564 "Haswell", &sandybridge_gtt_driver },
1565 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
1566 "Haswell", &sandybridge_gtt_driver },
1567 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
1568 "Haswell", &sandybridge_gtt_driver },
1569 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
1570 "Haswell", &sandybridge_gtt_driver },
1571 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
1514 "Haswell", &sandybridge_gtt_driver }, 1572 "Haswell", &sandybridge_gtt_driver },
1515 { 0, NULL, NULL } 1573 { 0, NULL, NULL }
1516}; 1574};
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 89682fa8801e..c4be3519a587 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -807,6 +807,7 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
807MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); 807MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
808#endif 808#endif
809 809
810#ifdef CONFIG_PM_SLEEP
810static int tpm_tis_resume(struct device *dev) 811static int tpm_tis_resume(struct device *dev)
811{ 812{
812 struct tpm_chip *chip = dev_get_drvdata(dev); 813 struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -816,6 +817,7 @@ static int tpm_tis_resume(struct device *dev)
816 817
817 return tpm_pm_resume(dev); 818 return tpm_pm_resume(dev);
818} 819}
820#endif
819 821
820static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); 822static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
821 823
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index cdc02ac8f41a..503996a94a6a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -454,6 +454,7 @@ static int __init pcc_cpufreq_probe(void)
454 mem_resource->address_length); 454 mem_resource->address_length);
455 if (pcch_virt_addr == NULL) { 455 if (pcch_virt_addr == NULL) {
456 pr_debug("probe: could not map shared mem region\n"); 456 pr_debug("probe: could not map shared mem region\n");
457 ret = -ENOMEM;
457 goto out_free; 458 goto out_free;
458 } 459 }
459 pcch_hdr = pcch_virt_addr; 460 pcch_hdr = pcch_virt_addr;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index fcfeb3cd8d31..5084975d793c 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -172,7 +172,8 @@ struct imxdma_engine {
172 struct device_dma_parameters dma_parms; 172 struct device_dma_parameters dma_parms;
173 struct dma_device dma_device; 173 struct dma_device dma_device;
174 void __iomem *base; 174 void __iomem *base;
175 struct clk *dma_clk; 175 struct clk *dma_ahb;
176 struct clk *dma_ipg;
176 spinlock_t lock; 177 spinlock_t lock;
177 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; 178 struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
178 struct imxdma_channel channel[IMX_DMA_CHANNELS]; 179 struct imxdma_channel channel[IMX_DMA_CHANNELS];
@@ -976,10 +977,20 @@ static int __init imxdma_probe(struct platform_device *pdev)
976 return 0; 977 return 0;
977 } 978 }
978 979
979 imxdma->dma_clk = clk_get(NULL, "dma"); 980 imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
980 if (IS_ERR(imxdma->dma_clk)) 981 if (IS_ERR(imxdma->dma_ipg)) {
981 return PTR_ERR(imxdma->dma_clk); 982 ret = PTR_ERR(imxdma->dma_ipg);
982 clk_enable(imxdma->dma_clk); 983 goto err_clk;
984 }
985
986 imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
987 if (IS_ERR(imxdma->dma_ahb)) {
988 ret = PTR_ERR(imxdma->dma_ahb);
989 goto err_clk;
990 }
991
992 clk_prepare_enable(imxdma->dma_ipg);
993 clk_prepare_enable(imxdma->dma_ahb);
983 994
984 /* reset DMA module */ 995 /* reset DMA module */
985 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); 996 imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -988,16 +999,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
988 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); 999 ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
989 if (ret) { 1000 if (ret) {
990 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); 1001 dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
991 kfree(imxdma); 1002 goto err_enable;
992 return ret;
993 } 1003 }
994 1004
995 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); 1005 ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
996 if (ret) { 1006 if (ret) {
997 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); 1007 dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
998 free_irq(MX1_DMA_INT, NULL); 1008 free_irq(MX1_DMA_INT, NULL);
999 kfree(imxdma); 1009 goto err_enable;
1000 return ret;
1001 } 1010 }
1002 } 1011 }
1003 1012
@@ -1094,7 +1103,10 @@ err_init:
1094 free_irq(MX1_DMA_INT, NULL); 1103 free_irq(MX1_DMA_INT, NULL);
1095 free_irq(MX1_DMA_ERR, NULL); 1104 free_irq(MX1_DMA_ERR, NULL);
1096 } 1105 }
1097 1106err_enable:
1107 clk_disable_unprepare(imxdma->dma_ipg);
1108 clk_disable_unprepare(imxdma->dma_ahb);
1109err_clk:
1098 kfree(imxdma); 1110 kfree(imxdma);
1099 return ret; 1111 return ret;
1100} 1112}
@@ -1114,7 +1126,9 @@ static int __exit imxdma_remove(struct platform_device *pdev)
1114 free_irq(MX1_DMA_ERR, NULL); 1126 free_irq(MX1_DMA_ERR, NULL);
1115 } 1127 }
1116 1128
1117 kfree(imxdma); 1129 clk_disable_unprepare(imxdma->dma_ipg);
1130 clk_disable_unprepare(imxdma->dma_ahb);
1131 kfree(imxdma);
1118 1132
1119 return 0; 1133 return 0;
1120} 1134}
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d52dbc6c54ab..24acd711e032 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1119,15 +1119,21 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1119static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) 1119static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1120{ 1120{
1121 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1121 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1122 struct tegra_dma *tdma = tdc->tdma;
1123 int ret;
1122 1124
1123 dma_cookie_init(&tdc->dma_chan); 1125 dma_cookie_init(&tdc->dma_chan);
1124 tdc->config_init = false; 1126 tdc->config_init = false;
1125 return 0; 1127 ret = clk_prepare_enable(tdma->dma_clk);
1128 if (ret < 0)
1129 dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
1130 return ret;
1126} 1131}
1127 1132
1128static void tegra_dma_free_chan_resources(struct dma_chan *dc) 1133static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1129{ 1134{
1130 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1135 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1136 struct tegra_dma *tdma = tdc->tdma;
1131 1137
1132 struct tegra_dma_desc *dma_desc; 1138 struct tegra_dma_desc *dma_desc;
1133 struct tegra_dma_sg_req *sg_req; 1139 struct tegra_dma_sg_req *sg_req;
@@ -1163,6 +1169,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1163 list_del(&sg_req->node); 1169 list_del(&sg_req->node);
1164 kfree(sg_req); 1170 kfree(sg_req);
1165 } 1171 }
1172 clk_disable_unprepare(tdma->dma_clk);
1166} 1173}
1167 1174
1168/* Tegra20 specific DMA controller information */ 1175/* Tegra20 specific DMA controller information */
@@ -1255,6 +1262,13 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
1255 } 1262 }
1256 } 1263 }
1257 1264
1265 /* Enable clock before accessing registers */
1266 ret = clk_prepare_enable(tdma->dma_clk);
1267 if (ret < 0) {
1268 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1269 goto err_pm_disable;
1270 }
1271
1258 /* Reset DMA controller */ 1272 /* Reset DMA controller */
1259 tegra_periph_reset_assert(tdma->dma_clk); 1273 tegra_periph_reset_assert(tdma->dma_clk);
1260 udelay(2); 1274 udelay(2);
@@ -1265,6 +1279,8 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
1265 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); 1279 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1266 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); 1280 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1267 1281
1282 clk_disable_unprepare(tdma->dma_clk);
1283
1268 INIT_LIST_HEAD(&tdma->dma_dev.channels); 1284 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1269 for (i = 0; i < cdata->nr_channels; i++) { 1285 for (i = 0; i < cdata->nr_channels; i++) {
1270 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1286 struct tegra_dma_channel *tdc = &tdma->channels[i];
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index a1c8754f52cf..202a99207b7d 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -339,7 +339,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
339 resource_size_t start, len; 339 resource_size_t start, len;
340 struct lnw_gpio *lnw; 340 struct lnw_gpio *lnw;
341 u32 gpio_base; 341 u32 gpio_base;
342 int retval = 0; 342 int retval;
343 int ngpio = id->driver_data; 343 int ngpio = id->driver_data;
344 344
345 retval = pci_enable_device(pdev); 345 retval = pci_enable_device(pdev);
@@ -357,6 +357,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
357 base = ioremap_nocache(start, len); 357 base = ioremap_nocache(start, len);
358 if (!base) { 358 if (!base) {
359 dev_err(&pdev->dev, "error mapping bar1\n"); 359 dev_err(&pdev->dev, "error mapping bar1\n");
360 retval = -EFAULT;
360 goto err3; 361 goto err3;
361 } 362 }
362 gpio_base = *((u32 *)base + 1); 363 gpio_base = *((u32 *)base + 1);
@@ -381,8 +382,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
381 382
382 lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, 383 lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
383 &lnw_gpio_irq_ops, lnw); 384 &lnw_gpio_irq_ops, lnw);
384 if (!lnw->domain) 385 if (!lnw->domain) {
386 retval = -ENOMEM;
385 goto err3; 387 goto err3;
388 }
386 389
387 lnw->reg_base = base; 390 lnw->reg_base = base;
388 lnw->chip.label = dev_name(&pdev->dev); 391 lnw->chip.label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 71a838f44501..b38986285868 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -99,7 +99,7 @@ static int msic_gpio_to_oreg(unsigned offset)
99 if (offset < 20) 99 if (offset < 20)
100 return INTEL_MSIC_GPIO0HV0CTLO - offset + 16; 100 return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
101 101
102 return INTEL_MSIC_GPIO1HV0CTLO + offset + 20; 102 return INTEL_MSIC_GPIO1HV0CTLO - offset + 20;
103} 103}
104 104
105static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 105static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 4db460b6ecf7..80f44bb64a87 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -465,9 +465,8 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
465 goto out_iounmap; 465 goto out_iounmap;
466 466
467 port->bgc.gc.to_irq = mxc_gpio_to_irq; 467 port->bgc.gc.to_irq = mxc_gpio_to_irq;
468 port->bgc.gc.base = pdev->id * 32; 468 port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
469 port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir); 469 pdev->id * 32;
470 port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
471 470
472 err = gpiochip_add(&port->bgc.gc); 471 err = gpiochip_add(&port->bgc.gc);
473 if (err) 472 if (err)
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 58a6a63a6ece..9cac88a65f78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -62,6 +62,7 @@ int pxa_last_gpio;
62 62
63#ifdef CONFIG_OF 63#ifdef CONFIG_OF
64static struct irq_domain *domain; 64static struct irq_domain *domain;
65static struct device_node *pxa_gpio_of_node;
65#endif 66#endif
66 67
67struct pxa_gpio_chip { 68struct pxa_gpio_chip {
@@ -277,6 +278,24 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
277 (value ? GPSR_OFFSET : GPCR_OFFSET)); 278 (value ? GPSR_OFFSET : GPCR_OFFSET));
278} 279}
279 280
281#ifdef CONFIG_OF_GPIO
282static int pxa_gpio_of_xlate(struct gpio_chip *gc,
283 const struct of_phandle_args *gpiospec,
284 u32 *flags)
285{
286 if (gpiospec->args[0] > pxa_last_gpio)
287 return -EINVAL;
288
289 if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip)
290 return -EINVAL;
291
292 if (flags)
293 *flags = gpiospec->args[1];
294
295 return gpiospec->args[0] % 32;
296}
297#endif
298
280static int __devinit pxa_init_gpio_chip(int gpio_end, 299static int __devinit pxa_init_gpio_chip(int gpio_end,
281 int (*set_wake)(unsigned int, unsigned int)) 300 int (*set_wake)(unsigned int, unsigned int))
282{ 301{
@@ -304,6 +323,11 @@ static int __devinit pxa_init_gpio_chip(int gpio_end,
304 c->get = pxa_gpio_get; 323 c->get = pxa_gpio_get;
305 c->set = pxa_gpio_set; 324 c->set = pxa_gpio_set;
306 c->to_irq = pxa_gpio_to_irq; 325 c->to_irq = pxa_gpio_to_irq;
326#ifdef CONFIG_OF_GPIO
327 c->of_node = pxa_gpio_of_node;
328 c->of_xlate = pxa_gpio_of_xlate;
329 c->of_gpio_n_cells = 2;
330#endif
307 331
308 /* number of GPIOs on last bank may be less than 32 */ 332 /* number of GPIOs on last bank may be less than 32 */
309 c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32; 333 c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
@@ -488,6 +512,7 @@ static int pxa_gpio_nums(void)
488 return count; 512 return count;
489} 513}
490 514
515#ifdef CONFIG_OF
491static struct of_device_id pxa_gpio_dt_ids[] = { 516static struct of_device_id pxa_gpio_dt_ids[] = {
492 { .compatible = "mrvl,pxa-gpio" }, 517 { .compatible = "mrvl,pxa-gpio" },
493 { .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO }, 518 { .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO },
@@ -505,9 +530,9 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
505 530
506const struct irq_domain_ops pxa_irq_domain_ops = { 531const struct irq_domain_ops pxa_irq_domain_ops = {
507 .map = pxa_irq_domain_map, 532 .map = pxa_irq_domain_map,
533 .xlate = irq_domain_xlate_twocell,
508}; 534};
509 535
510#ifdef CONFIG_OF
511static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) 536static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
512{ 537{
513 int ret, nr_banks, nr_gpios, irq_base; 538 int ret, nr_banks, nr_gpios, irq_base;
@@ -545,6 +570,7 @@ static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
545 } 570 }
546 domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0, 571 domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0,
547 &pxa_irq_domain_ops, NULL); 572 &pxa_irq_domain_ops, NULL);
573 pxa_gpio_of_node = np;
548 return 0; 574 return 0;
549err: 575err:
550 iounmap(gpio_reg_base); 576 iounmap(gpio_reg_base);
@@ -653,7 +679,7 @@ static struct platform_driver pxa_gpio_driver = {
653 .probe = pxa_gpio_probe, 679 .probe = pxa_gpio_probe,
654 .driver = { 680 .driver = {
655 .name = "pxa-gpio", 681 .name = "pxa-gpio",
656 .of_match_table = pxa_gpio_dt_ids, 682 .of_match_table = of_match_ptr(pxa_gpio_dt_ids),
657 }, 683 },
658}; 684};
659 685
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 92f7b2bb79d4..ba126cc04073 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,12 +2454,6 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
2454 }, 2454 },
2455 }, { 2455 }, {
2456 .chip = { 2456 .chip = {
2457 .base = EXYNOS5_GPC4(0),
2458 .ngpio = EXYNOS5_GPIO_C4_NR,
2459 .label = "GPC4",
2460 },
2461 }, {
2462 .chip = {
2463 .base = EXYNOS5_GPD0(0), 2457 .base = EXYNOS5_GPD0(0),
2464 .ngpio = EXYNOS5_GPIO_D0_NR, 2458 .ngpio = EXYNOS5_GPIO_D0_NR,
2465 .label = "GPD0", 2459 .label = "GPD0",
@@ -2513,6 +2507,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
2513 .label = "GPY6", 2507 .label = "GPY6",
2514 }, 2508 },
2515 }, { 2509 }, {
2510 .chip = {
2511 .base = EXYNOS5_GPC4(0),
2512 .ngpio = EXYNOS5_GPIO_C4_NR,
2513 .label = "GPC4",
2514 },
2515 }, {
2516 .config = &samsung_gpio_cfgs[9], 2516 .config = &samsung_gpio_cfgs[9],
2517 .irq_base = IRQ_EINT(0), 2517 .irq_base = IRQ_EINT(0),
2518 .chip = { 2518 .chip = {
@@ -2836,7 +2836,7 @@ static __init void exynos5_gpiolib_init(void)
2836 } 2836 }
2837 2837
2838 /* need to set base address for gpc4 */ 2838 /* need to set base address for gpc4 */
2839 exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; 2839 exynos5_gpios_1[20].base = gpio_base1 + 0x2E0;
2840 2840
2841 /* need to set base address for gpx */ 2841 /* need to set base address for gpx */
2842 chip = &exynos5_gpios_1[21]; 2842 chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 424dce8e3f30..8707d4572a06 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -241,7 +241,8 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
241 break; 241 break;
242 242
243 default: 243 default:
244 return -ENODEV; 244 err = -ENODEV;
245 goto err_sch_gpio_core;
245 } 246 }
246 247
247 sch_gpio_core.dev = &pdev->dev; 248 sch_gpio_core.dev = &pdev->dev;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 66d4a28ad5a2..0303935d10e2 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name,
119{ 119{
120 const struct firmware *fw; 120 const struct firmware *fw;
121 struct platform_device *pdev; 121 struct platform_device *pdev;
122 u8 *fwdata = NULL, *edid; 122 u8 *fwdata = NULL, *edid, *new_edid;
123 int fwsize, expected; 123 int fwsize, expected;
124 int builtin = 0, err = 0; 124 int builtin = 0, err = 0;
125 int i, valid_extensions = 0; 125 int i, valid_extensions = 0;
@@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name,
195 "\"%s\" for connector \"%s\"\n", valid_extensions, 195 "\"%s\" for connector \"%s\"\n", valid_extensions,
196 edid[0x7e], name, connector_name); 196 edid[0x7e], name, connector_name);
197 edid[0x7e] = valid_extensions; 197 edid[0x7e] = valid_extensions;
198 edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, 198 new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
199 GFP_KERNEL); 199 GFP_KERNEL);
200 if (edid == NULL) { 200 if (new_edid == NULL) {
201 err = -ENOMEM; 201 err = -ENOMEM;
202 kfree(edid);
202 goto relfw_out; 203 goto relfw_out;
203 } 204 }
205 edid = new_edid;
204 } 206 }
205 207
206 connector->display_info.raw_edid = edid; 208 connector->display_info.raw_edid = edid;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ed22612bc847..a24ffbe97c01 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -346,11 +346,40 @@ static const struct pci_device_id pciidlist[] = { /* aka */
346 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ 346 INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
347 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ 347 INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
348 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ 348 INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
349 INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
349 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ 350 INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
350 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ 351 INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
352 INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
351 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ 353 INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
352 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ 354 INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
353 INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */ 355 INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
356 INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
357 INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
358 INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
359 INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
360 INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
361 INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
362 INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
363 INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
364 INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
365 INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
366 INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
367 INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
368 INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
369 INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
370 INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
371 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
372 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
373 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
374 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
375 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
376 INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
377 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
378 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
379 INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
380 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
381 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
382 INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
354 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), 383 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
355 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), 384 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
356 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), 385 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index da8b01fb1bf8..a9d58d72bb4d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -451,7 +451,6 @@ int i915_switch_context(struct intel_ring_buffer *ring,
451 struct drm_i915_file_private *file_priv = NULL; 451 struct drm_i915_file_private *file_priv = NULL;
452 struct i915_hw_context *to; 452 struct i915_hw_context *to;
453 struct drm_i915_gem_object *from_obj = ring->last_context_obj; 453 struct drm_i915_gem_object *from_obj = ring->last_context_obj;
454 int ret;
455 454
456 if (dev_priv->hw_contexts_disabled) 455 if (dev_priv->hw_contexts_disabled)
457 return 0; 456 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 5af631e788c8..ff2819ea0813 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
291 target_i915_obj = to_intel_bo(target_obj); 291 target_i915_obj = to_intel_bo(target_obj);
292 target_offset = target_i915_obj->gtt_offset; 292 target_offset = target_i915_obj->gtt_offset;
293 293
294 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
295 * pipe_control writes because the gpu doesn't properly redirect them
296 * through the ppgtt for non_secure batchbuffers. */
297 if (unlikely(IS_GEN6(dev) &&
298 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
299 !target_i915_obj->has_global_gtt_mapping)) {
300 i915_gem_gtt_bind_object(target_i915_obj,
301 target_i915_obj->cache_level);
302 }
303
294 /* The target buffer should have appeared before us in the 304 /* The target buffer should have appeared before us in the
295 * exec_object list, so it should have a GTT space bound by now. 305 * exec_object list, so it should have a GTT space bound by now.
296 */ 306 */
@@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
399 io_mapping_unmap_atomic(reloc_page); 409 io_mapping_unmap_atomic(reloc_page);
400 } 410 }
401 411
402 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
403 * pipe_control writes because the gpu doesn't properly redirect them
404 * through the ppgtt for non_secure batchbuffers. */
405 if (unlikely(IS_GEN6(dev) &&
406 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
407 !target_i915_obj->has_global_gtt_mapping)) {
408 i915_gem_gtt_bind_object(target_i915_obj,
409 target_i915_obj->cache_level);
410 }
411
412 /* and update the user's relocation entry */ 412 /* and update the user's relocation entry */
413 reloc->presumed_offset = target_offset; 413 reloc->presumed_offset = target_offset;
414 414
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9fd25a435536..ee9b68f6bc36 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -361,7 +361,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
361 struct drm_device *dev = obj->base.dev; 361 struct drm_device *dev = obj->base.dev;
362 struct drm_i915_private *dev_priv = dev->dev_private; 362 struct drm_i915_private *dev_priv = dev->dev_private;
363 363
364 if (dev_priv->mm.gtt->needs_dmar) 364 /* don't map imported dma buf objects */
365 if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
365 return intel_gtt_map_memory(obj->pages, 366 return intel_gtt_map_memory(obj->pages,
366 obj->base.size >> PAGE_SHIFT, 367 obj->base.size >> PAGE_SHIFT,
367 &obj->sg_list, 368 &obj->sg_list,
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2f5388af8df9..7631807a2788 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
32#include "intel_drv.h" 32#include "intel_drv.h"
33#include "i915_drv.h" 33#include "i915_drv.h"
34 34
35#ifdef CONFIG_PM
35static u32 calc_residency(struct drm_device *dev, const u32 reg) 36static u32 calc_residency(struct drm_device *dev, const u32 reg)
36{ 37{
37 struct drm_i915_private *dev_priv = dev->dev_private; 38 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -224,3 +225,14 @@ void i915_teardown_sysfs(struct drm_device *dev)
224 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 225 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
225 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); 226 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
226} 227}
228#else
229void i915_setup_sysfs(struct drm_device *dev)
230{
231 return;
232}
233
234void i915_teardown_sysfs(struct drm_device *dev)
235{
236 return;
237}
238#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f6159765f1eb..a69a3d0d3acf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -869,6 +869,7 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
869 unsigned long bestppm, ppm, absppm; 869 unsigned long bestppm, ppm, absppm;
870 int dotclk, flag; 870 int dotclk, flag;
871 871
872 flag = 0;
872 dotclk = target * 1000; 873 dotclk = target * 1000;
873 bestppm = 1000000; 874 bestppm = 1000000;
874 ppm = absppm = 0; 875 ppm = absppm = 0;
@@ -3753,17 +3754,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3753 continue; 3754 continue;
3754 } 3755 }
3755 3756
3756 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
3757 /* Use VBT settings if we have an eDP panel */
3758 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
3759
3760 if (edp_bpc < display_bpc) {
3761 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
3762 display_bpc = edp_bpc;
3763 }
3764 continue;
3765 }
3766
3767 /* Not one of the known troublemakers, check the EDID */ 3757 /* Not one of the known troublemakers, check the EDID */
3768 list_for_each_entry(connector, &dev->mode_config.connector_list, 3758 list_for_each_entry(connector, &dev->mode_config.connector_list,
3769 head) { 3759 head) {
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0a56b9ab0f58..a6c426afaa7a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1174,10 +1174,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1174 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1174 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1175 1175
1176 pp = ironlake_get_pp_control(dev_priv); 1176 pp = ironlake_get_pp_control(dev_priv);
1177 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1177 /* We need to switch off panel power _and_ force vdd, for otherwise some
1178 * panels get very unhappy and cease to work. */
1179 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1178 I915_WRITE(PCH_PP_CONTROL, pp); 1180 I915_WRITE(PCH_PP_CONTROL, pp);
1179 POSTING_READ(PCH_PP_CONTROL); 1181 POSTING_READ(PCH_PP_CONTROL);
1180 1182
1183 intel_dp->want_panel_vdd = false;
1184
1181 ironlake_wait_panel_off(intel_dp); 1185 ironlake_wait_panel_off(intel_dp);
1182} 1186}
1183 1187
@@ -1287,11 +1291,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1287 * ensure that we have vdd while we switch off the panel. */ 1291 * ensure that we have vdd while we switch off the panel. */
1288 ironlake_edp_panel_vdd_on(intel_dp); 1292 ironlake_edp_panel_vdd_on(intel_dp);
1289 ironlake_edp_backlight_off(intel_dp); 1293 ironlake_edp_backlight_off(intel_dp);
1290 ironlake_edp_panel_off(intel_dp);
1291
1292 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1294 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1295 ironlake_edp_panel_off(intel_dp);
1293 intel_dp_link_down(intel_dp); 1296 intel_dp_link_down(intel_dp);
1294 ironlake_edp_panel_vdd_off(intel_dp, false);
1295} 1297}
1296 1298
1297static void intel_dp_commit(struct drm_encoder *encoder) 1299static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1326,11 +1328,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1326 /* Switching the panel off requires vdd. */ 1328 /* Switching the panel off requires vdd. */
1327 ironlake_edp_panel_vdd_on(intel_dp); 1329 ironlake_edp_panel_vdd_on(intel_dp);
1328 ironlake_edp_backlight_off(intel_dp); 1330 ironlake_edp_backlight_off(intel_dp);
1329 ironlake_edp_panel_off(intel_dp);
1330
1331 intel_dp_sink_dpms(intel_dp, mode); 1331 intel_dp_sink_dpms(intel_dp, mode);
1332 ironlake_edp_panel_off(intel_dp);
1332 intel_dp_link_down(intel_dp); 1333 intel_dp_link_down(intel_dp);
1333 ironlake_edp_panel_vdd_off(intel_dp, false);
1334 1334
1335 if (is_cpu_edp(intel_dp)) 1335 if (is_cpu_edp(intel_dp))
1336 ironlake_edp_pll_off(encoder); 1336 ironlake_edp_pll_off(encoder);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 84353559441c..132ab511b90c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -46,15 +46,16 @@
46}) 46})
47 47
48#define wait_for_atomic_us(COND, US) ({ \ 48#define wait_for_atomic_us(COND, US) ({ \
49 int i, ret__ = -ETIMEDOUT; \ 49 unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \
50 for (i = 0; i < (US); i++) { \ 50 int ret__ = 0; \
51 if ((COND)) { \ 51 while (!(COND)) { \
52 ret__ = 0; \ 52 if (time_after(jiffies, timeout__)) { \
53 break; \ 53 ret__ = -ETIMEDOUT; \
54 } \ 54 break; \
55 udelay(1); \ 55 } \
56 } \ 56 cpu_relax(); \
57 ret__; \ 57 } \
58 ret__; \
58}) 59})
59 60
60#define wait_for(COND, MS) _wait_for(COND, MS, 1) 61#define wait_for(COND, MS) _wait_for(COND, MS, 1)
@@ -380,7 +381,6 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
380 const struct drm_display_mode *mode, 381 const struct drm_display_mode *mode,
381 struct drm_display_mode *adjusted_mode); 382 struct drm_display_mode *adjusted_mode);
382extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 383extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
383extern u32 intel_panel_get_backlight(struct drm_device *dev);
384extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 384extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
385extern int intel_panel_setup_backlight(struct drm_device *dev); 385extern int intel_panel_setup_backlight(struct drm_device *dev);
386extern void intel_panel_enable_backlight(struct drm_device *dev, 386extern void intel_panel_enable_backlight(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1991a4408cf9..b9755f6378d8 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -486,9 +486,6 @@ int intel_setup_gmbus(struct drm_device *dev)
486 bus->dev_priv = dev_priv; 486 bus->dev_priv = dev_priv;
487 487
488 bus->adapter.algo = &gmbus_algorithm; 488 bus->adapter.algo = &gmbus_algorithm;
489 ret = i2c_add_adapter(&bus->adapter);
490 if (ret)
491 goto err;
492 489
493 /* By default use a conservative clock rate */ 490 /* By default use a conservative clock rate */
494 bus->reg0 = port | GMBUS_RATE_100KHZ; 491 bus->reg0 = port | GMBUS_RATE_100KHZ;
@@ -498,6 +495,10 @@ int intel_setup_gmbus(struct drm_device *dev)
498 bus->force_bit = true; 495 bus->force_bit = true;
499 496
500 intel_gpio_setup(bus, port); 497 intel_gpio_setup(bus, port);
498
499 ret = i2c_add_adapter(&bus->adapter);
500 if (ret)
501 goto err;
501 } 502 }
502 503
503 intel_i2c_reset(dev_priv->dev); 504 intel_i2c_reset(dev_priv->dev);
@@ -540,9 +541,6 @@ void intel_teardown_gmbus(struct drm_device *dev)
540 struct drm_i915_private *dev_priv = dev->dev_private; 541 struct drm_i915_private *dev_priv = dev->dev_private;
541 int i; 542 int i;
542 543
543 if (dev_priv->gmbus == NULL)
544 return;
545
546 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 544 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
547 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 545 struct intel_gmbus *bus = &dev_priv->gmbus[i];
548 i2c_del_adapter(&bus->adapter); 546 i2c_del_adapter(&bus->adapter);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 10c7d39034e1..3df4f5fa892a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -213,7 +213,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
213 return val; 213 return val;
214} 214}
215 215
216u32 intel_panel_get_backlight(struct drm_device *dev) 216static u32 intel_panel_get_backlight(struct drm_device *dev)
217{ 217{
218 struct drm_i915_private *dev_priv = dev->dev_private; 218 struct drm_i915_private *dev_priv = dev->dev_private;
219 u32 val; 219 u32 val;
@@ -311,9 +311,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
311 if (dev_priv->backlight_level == 0) 311 if (dev_priv->backlight_level == 0)
312 dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 312 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
313 313
314 dev_priv->backlight_enabled = true;
315 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
316
317 if (INTEL_INFO(dev)->gen >= 4) { 314 if (INTEL_INFO(dev)->gen >= 4) {
318 uint32_t reg, tmp; 315 uint32_t reg, tmp;
319 316
@@ -326,7 +323,7 @@ void intel_panel_enable_backlight(struct drm_device *dev,
326 * we don't track the backlight dpms state, hence check whether 323 * we don't track the backlight dpms state, hence check whether
327 * we have to do anything first. */ 324 * we have to do anything first. */
328 if (tmp & BLM_PWM_ENABLE) 325 if (tmp & BLM_PWM_ENABLE)
329 return; 326 goto set_level;
330 327
331 if (dev_priv->num_pipe == 3) 328 if (dev_priv->num_pipe == 3)
332 tmp &= ~BLM_PIPE_SELECT_IVB; 329 tmp &= ~BLM_PIPE_SELECT_IVB;
@@ -347,6 +344,14 @@ void intel_panel_enable_backlight(struct drm_device *dev,
347 I915_WRITE(BLC_PWM_PCH_CTL1, tmp); 344 I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
348 } 345 }
349 } 346 }
347
348set_level:
349 /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
350 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
351 * registers are set.
352 */
353 dev_priv->backlight_enabled = true;
354 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
350} 355}
351 356
352static void intel_panel_init_backlight(struct drm_device *dev) 357static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 94aabcaa3a67..58c07cdafb7e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3963,6 +3963,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
3963 DRM_ERROR("Force wake wait timed out\n"); 3963 DRM_ERROR("Force wake wait timed out\n");
3964 3964
3965 I915_WRITE_NOTRACE(FORCEWAKE, 1); 3965 I915_WRITE_NOTRACE(FORCEWAKE, 1);
3966 POSTING_READ(FORCEWAKE);
3966 3967
3967 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) 3968 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
3968 DRM_ERROR("Force wake wait timed out\n"); 3969 DRM_ERROR("Force wake wait timed out\n");
@@ -3983,6 +3984,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
3983 DRM_ERROR("Force wake wait timed out\n"); 3984 DRM_ERROR("Force wake wait timed out\n");
3984 3985
3985 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); 3986 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
3987 POSTING_READ(FORCEWAKE_MT);
3986 3988
3987 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) 3989 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
3988 DRM_ERROR("Force wake wait timed out\n"); 3990 DRM_ERROR("Force wake wait timed out\n");
@@ -4018,14 +4020,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
4018static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 4020static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4019{ 4021{
4020 I915_WRITE_NOTRACE(FORCEWAKE, 0); 4022 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4021 /* The below doubles as a POSTING_READ */ 4023 POSTING_READ(FORCEWAKE);
4022 gen6_gt_check_fifodbg(dev_priv); 4024 gen6_gt_check_fifodbg(dev_priv);
4023} 4025}
4024 4026
4025static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 4027static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4026{ 4028{
4027 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); 4029 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
4028 /* The below doubles as a POSTING_READ */ 4030 POSTING_READ(FORCEWAKE_MT);
4029 gen6_gt_check_fifodbg(dev_priv); 4031 gen6_gt_check_fifodbg(dev_priv);
4030} 4032}
4031 4033
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index bf0195a96d53..e2a73b38abe9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -227,31 +227,36 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
227 * number of bits based on the write domains has little performance 227 * number of bits based on the write domains has little performance
228 * impact. 228 * impact.
229 */ 229 */
230 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 230 if (flush_domains) {
231 flags |= PIPE_CONTROL_TLB_INVALIDATE; 231 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
232 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 232 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
233 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 233 /*
234 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 234 * Ensure that any following seqno writes only happen
235 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 235 * when the render cache is indeed flushed.
236 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 236 */
237 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
238 /*
239 * Ensure that any following seqno writes only happen when the render
240 * cache is indeed flushed (but only if the caller actually wants that).
241 */
242 if (flush_domains)
243 flags |= PIPE_CONTROL_CS_STALL; 237 flags |= PIPE_CONTROL_CS_STALL;
238 }
239 if (invalidate_domains) {
240 flags |= PIPE_CONTROL_TLB_INVALIDATE;
241 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
244 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
245 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
246 /*
247 * TLB invalidate requires a post-sync write.
248 */
249 flags |= PIPE_CONTROL_QW_WRITE;
250 }
244 251
245 ret = intel_ring_begin(ring, 6); 252 ret = intel_ring_begin(ring, 4);
246 if (ret) 253 if (ret)
247 return ret; 254 return ret;
248 255
249 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 256 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
250 intel_ring_emit(ring, flags); 257 intel_ring_emit(ring, flags);
251 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 258 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
252 intel_ring_emit(ring, 0); /* lower dword */ 259 intel_ring_emit(ring, 0);
253 intel_ring_emit(ring, 0); /* uppwer dword */
254 intel_ring_emit(ring, MI_NOOP);
255 intel_ring_advance(ring); 260 intel_ring_advance(ring);
256 261
257 return 0; 262 return 0;
@@ -289,8 +294,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
289 I915_WRITE_HEAD(ring, 0); 294 I915_WRITE_HEAD(ring, 0);
290 ring->write_tail(ring, 0); 295 ring->write_tail(ring, 0);
291 296
292 /* Initialize the ring. */
293 I915_WRITE_START(ring, obj->gtt_offset);
294 head = I915_READ_HEAD(ring) & HEAD_ADDR; 297 head = I915_READ_HEAD(ring) & HEAD_ADDR;
295 298
296 /* G45 ring initialization fails to reset head to zero */ 299 /* G45 ring initialization fails to reset head to zero */
@@ -316,6 +319,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
316 } 319 }
317 } 320 }
318 321
322 /* Initialize the ring. This must happen _after_ we've cleared the ring
323 * registers with the above sequence (the readback of the HEAD registers
324 * also enforces ordering), otherwise the hw might lose the new ring
325 * register values. */
326 I915_WRITE_START(ring, obj->gtt_offset);
319 I915_WRITE_CTL(ring, 327 I915_WRITE_CTL(ring,
320 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 328 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
321 | RING_VALID); 329 | RING_VALID);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 26a6a4d0d078..d172e9873131 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -444,13 +444,16 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
444 struct i2c_msg *msgs; 444 struct i2c_msg *msgs;
445 int i, ret = true; 445 int i, ret = true;
446 446
447 /* Would be simpler to allocate both in one go ? */
447 buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL); 448 buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
448 if (!buf) 449 if (!buf)
449 return false; 450 return false;
450 451
451 msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL); 452 msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
452 if (!msgs) 453 if (!msgs) {
454 kfree(buf);
453 return false; 455 return false;
456 }
454 457
455 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); 458 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
456 459
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a4d7c500c97b..b69642d5d850 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -468,10 +468,11 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
468{ 468{
469 unsigned int vcomax, vcomin, pllreffreq; 469 unsigned int vcomax, vcomin, pllreffreq;
470 unsigned int delta, tmpdelta; 470 unsigned int delta, tmpdelta;
471 unsigned int testr, testn, testm, testo; 471 int testr, testn, testm, testo;
472 unsigned int p, m, n; 472 unsigned int p, m, n;
473 unsigned int computed; 473 unsigned int computed, vco;
474 int tmp; 474 int tmp;
475 const unsigned int m_div_val[] = { 1, 2, 4, 8 };
475 476
476 m = n = p = 0; 477 m = n = p = 0;
477 vcomax = 1488000; 478 vcomax = 1488000;
@@ -490,12 +491,13 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
490 if (delta == 0) 491 if (delta == 0)
491 break; 492 break;
492 for (testo = 5; testo < 33; testo++) { 493 for (testo = 5; testo < 33; testo++) {
493 computed = pllreffreq * (testn + 1) / 494 vco = pllreffreq * (testn + 1) /
494 (testr + 1); 495 (testr + 1);
495 if (computed < vcomin) 496 if (vco < vcomin)
496 continue; 497 continue;
497 if (computed > vcomax) 498 if (vco > vcomax)
498 continue; 499 continue;
500 computed = vco / (m_div_val[testm] * (testo + 1));
499 if (computed > clock) 501 if (computed > clock)
500 tmpdelta = computed - clock; 502 tmpdelta = computed - clock;
501 else 503 else
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 77e564667b5c..240cf962c999 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -229,7 +229,7 @@ nouveau_i2c_init(struct drm_device *dev)
229 } 229 }
230 break; 230 break;
231 case 6: /* NV50- DP AUX */ 231 case 6: /* NV50- DP AUX */
232 port->drive = entry[0]; 232 port->drive = entry[0] & 0x0f;
233 port->sense = port->drive; 233 port->sense = port->drive;
234 port->adapter.algo = &nouveau_dp_i2c_algo; 234 port->adapter.algo = &nouveau_dp_i2c_algo;
235 break; 235 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 1cdfd6e757ce..1866dbb49979 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -731,7 +731,6 @@ nouveau_card_init(struct drm_device *dev)
731 case 0xa3: 731 case 0xa3:
732 case 0xa5: 732 case 0xa5:
733 case 0xa8: 733 case 0xa8:
734 case 0xaf:
735 nva3_copy_create(dev); 734 nva3_copy_create(dev);
736 break; 735 break;
737 } 736 }
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
index cc82d799fc3b..c564c5e4c30a 100644
--- a/drivers/gpu/drm/nouveau/nv84_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -117,17 +117,22 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
117 struct drm_device *dev = chan->dev; 117 struct drm_device *dev = chan->dev;
118 struct drm_nouveau_private *dev_priv = dev->dev_private; 118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119 unsigned long flags; 119 unsigned long flags;
120 u32 save;
120 121
121 /* remove channel from playlist, will context switch if active */ 122 /* remove channel from playlist, will context switch if active */
122 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 123 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
123 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); 124 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
124 nv50_fifo_playlist_update(dev); 125 nv50_fifo_playlist_update(dev);
125 126
127 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
128
126 /* tell any engines on this channel to unload their contexts */ 129 /* tell any engines on this channel to unload their contexts */
127 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); 130 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
128 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) 131 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
129 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); 132 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
130 133
134 nv_wr32(dev, 0x002520, save);
135
131 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); 136 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
132 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 137 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
133 138
@@ -184,10 +189,13 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
184 struct drm_nouveau_private *dev_priv = dev->dev_private; 189 struct drm_nouveau_private *dev_priv = dev->dev_private;
185 struct nv84_fifo_priv *priv = nv_engine(dev, engine); 190 struct nv84_fifo_priv *priv = nv_engine(dev, engine);
186 int i; 191 int i;
192 u32 save;
187 193
188 /* set playlist length to zero, fifo will unload context */ 194 /* set playlist length to zero, fifo will unload context */
189 nv_wr32(dev, 0x0032ec, 0); 195 nv_wr32(dev, 0x0032ec, 0);
190 196
197 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
198
191 /* tell all connected engines to unload their contexts */ 199 /* tell all connected engines to unload their contexts */
192 for (i = 0; i < priv->base.channels; i++) { 200 for (i = 0; i < priv->base.channels; i++) {
193 struct nouveau_channel *chan = dev_priv->channels.ptr[i]; 201 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
@@ -199,6 +207,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
199 } 207 }
200 } 208 }
201 209
210 nv_wr32(dev, 0x002520, save);
202 nv_wr32(dev, 0x002140, 0); 211 nv_wr32(dev, 0x002140, 0);
203 return 0; 212 return 0;
204} 213}
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 7c95c44e2887..4e712b10ebdb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -557,7 +557,7 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
557 nouveau_mem_exec(&exec, info->perflvl); 557 nouveau_mem_exec(&exec, info->perflvl);
558 558
559 if (dev_priv->chipset < 0xd0) 559 if (dev_priv->chipset < 0xd0)
560 nv_wr32(dev, 0x611200, 0x00003300); 560 nv_wr32(dev, 0x611200, 0x00003330);
561 else 561 else
562 nv_wr32(dev, 0x62c000, 0x03030300); 562 nv_wr32(dev, 0x62c000, 0x03030300);
563} 563}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index d0d60e1e7f95..dac525b2994e 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -790,7 +790,7 @@ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
790 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 790 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
791 int ch = EVO_CURS(nv_crtc->index); 791 int ch = EVO_CURS(nv_crtc->index);
792 792
793 evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x); 793 evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
794 evo_piow(crtc->dev, ch, 0x0080, 0x00000000); 794 evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
795 return 0; 795 return 0;
796} 796}
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
index 1855ecbd843b..e98d144e6eb9 100644
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -294,6 +294,25 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
294 printk(" on channel 0x%010llx\n", (u64)inst << 12); 294 printk(" on channel 0x%010llx\n", (u64)inst << 12);
295} 295}
296 296
297static int
298nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
299{
300 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
301 struct drm_nouveau_private *dev_priv = dev->dev_private;
302 struct nouveau_channel *chan = NULL;
303 unsigned long flags;
304 int ret = -EINVAL;
305
306 spin_lock_irqsave(&dev_priv->channels.lock, flags);
307 if (likely(chid >= 0 && chid < priv->base.channels)) {
308 chan = dev_priv->channels.ptr[chid];
309 if (likely(chan))
310 ret = nouveau_finish_page_flip(chan, NULL);
311 }
312 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
313 return ret;
314}
315
297static void 316static void
298nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) 317nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
299{ 318{
@@ -303,11 +322,21 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
303 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f; 322 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
304 u32 subc = (addr & 0x00070000); 323 u32 subc = (addr & 0x00070000);
305 u32 mthd = (addr & 0x00003ffc); 324 u32 mthd = (addr & 0x00003ffc);
325 u32 show = stat;
326
327 if (stat & 0x00200000) {
328 if (mthd == 0x0054) {
329 if (!nve0_fifo_page_flip(dev, chid))
330 show &= ~0x00200000;
331 }
332 }
306 333
307 NV_INFO(dev, "PSUBFIFO %d:", unit); 334 if (show) {
308 nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat); 335 NV_INFO(dev, "PFIFO%d:", unit);
309 NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n", 336 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
310 unit, chid, subc, mthd, data); 337 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
338 unit, chid, subc, mthd, data);
339 }
311 340
312 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008); 341 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
313 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat); 342 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9e6f76fec527..c6fcb5b86a45 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -259,7 +259,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
259 /* adjust pm to dpms changes BEFORE enabling crtcs */ 259 /* adjust pm to dpms changes BEFORE enabling crtcs */
260 radeon_pm_compute_clocks(rdev); 260 radeon_pm_compute_clocks(rdev);
261 /* disable crtc pair power gating before programming */ 261 /* disable crtc pair power gating before programming */
262 if (ASIC_IS_DCE6(rdev)) 262 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
263 atombios_powergate_crtc(crtc, ATOM_DISABLE); 263 atombios_powergate_crtc(crtc, ATOM_DISABLE);
264 atombios_enable_crtc(crtc, ATOM_ENABLE); 264 atombios_enable_crtc(crtc, ATOM_ENABLE);
265 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 265 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -279,7 +279,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
279 atombios_enable_crtc(crtc, ATOM_DISABLE); 279 atombios_enable_crtc(crtc, ATOM_DISABLE);
280 radeon_crtc->enabled = false; 280 radeon_crtc->enabled = false;
281 /* power gating is per-pair */ 281 /* power gating is per-pair */
282 if (ASIC_IS_DCE6(rdev)) { 282 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) {
283 struct drm_crtc *other_crtc; 283 struct drm_crtc *other_crtc;
284 struct radeon_crtc *other_radeon_crtc; 284 struct radeon_crtc *other_radeon_crtc;
285 list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { 285 list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
@@ -1531,12 +1531,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1531 * crtc virtual pixel clock. 1531 * crtc virtual pixel clock.
1532 */ 1532 */
1533 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { 1533 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
1534 if (ASIC_IS_DCE5(rdev)) 1534 if (rdev->clock.dp_extclk)
1535 return ATOM_DCPLL; 1535 return ATOM_PPLL_INVALID;
1536 else if (ASIC_IS_DCE6(rdev)) 1536 else if (ASIC_IS_DCE6(rdev))
1537 return ATOM_PPLL0; 1537 return ATOM_PPLL0;
1538 else if (rdev->clock.dp_extclk) 1538 else if (ASIC_IS_DCE5(rdev))
1539 return ATOM_PPLL_INVALID; 1539 return ATOM_DCPLL;
1540 } 1540 }
1541 } 1541 }
1542 } 1542 }
@@ -1635,18 +1635,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1635static void atombios_crtc_prepare(struct drm_crtc *crtc) 1635static void atombios_crtc_prepare(struct drm_crtc *crtc)
1636{ 1636{
1637 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1637 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1638 struct drm_device *dev = crtc->dev;
1639 struct radeon_device *rdev = dev->dev_private;
1638 1640
1641 radeon_crtc->in_mode_set = true;
1639 /* pick pll */ 1642 /* pick pll */
1640 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); 1643 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1641 1644
1645 /* disable crtc pair power gating before programming */
1646 if (ASIC_IS_DCE6(rdev))
1647 atombios_powergate_crtc(crtc, ATOM_DISABLE);
1648
1642 atombios_lock_crtc(crtc, ATOM_ENABLE); 1649 atombios_lock_crtc(crtc, ATOM_ENABLE);
1643 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1650 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1644} 1651}
1645 1652
1646static void atombios_crtc_commit(struct drm_crtc *crtc) 1653static void atombios_crtc_commit(struct drm_crtc *crtc)
1647{ 1654{
1655 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1656
1648 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 1657 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
1649 atombios_lock_crtc(crtc, ATOM_DISABLE); 1658 atombios_lock_crtc(crtc, ATOM_DISABLE);
1659 radeon_crtc->in_mode_set = false;
1650} 1660}
1651 1661
1652static void atombios_crtc_disable(struct drm_crtc *crtc) 1662static void atombios_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e585a3b947eb..e93b80a6d4e9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1229,24 +1229,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
1229 1229
1230void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 1230void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
1231{ 1231{
1232 save->vga_control[0] = RREG32(D1VGA_CONTROL);
1233 save->vga_control[1] = RREG32(D2VGA_CONTROL);
1234 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 1232 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1235 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); 1233 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
1236 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
1237 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
1238 if (rdev->num_crtc >= 4) {
1239 save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
1240 save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
1241 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
1242 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
1243 }
1244 if (rdev->num_crtc >= 6) {
1245 save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
1246 save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
1247 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
1248 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
1249 }
1250 1234
1251 /* Stop all video */ 1235 /* Stop all video */
1252 WREG32(VGA_RENDER_CONTROL, 0); 1236 WREG32(VGA_RENDER_CONTROL, 0);
@@ -1357,47 +1341,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
1357 /* Unlock host access */ 1341 /* Unlock host access */
1358 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); 1342 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1359 mdelay(1); 1343 mdelay(1);
1360 /* Restore video state */
1361 WREG32(D1VGA_CONTROL, save->vga_control[0]);
1362 WREG32(D2VGA_CONTROL, save->vga_control[1]);
1363 if (rdev->num_crtc >= 4) {
1364 WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
1365 WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
1366 }
1367 if (rdev->num_crtc >= 6) {
1368 WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
1369 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
1370 }
1371 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
1372 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
1373 if (rdev->num_crtc >= 4) {
1374 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
1375 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
1376 }
1377 if (rdev->num_crtc >= 6) {
1378 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
1379 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
1380 }
1381 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
1382 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
1383 if (rdev->num_crtc >= 4) {
1384 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
1385 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
1386 }
1387 if (rdev->num_crtc >= 6) {
1388 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
1389 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
1390 }
1391 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1392 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1393 if (rdev->num_crtc >= 4) {
1394 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1395 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1396 }
1397 if (rdev->num_crtc >= 6) {
1398 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1399 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1400 }
1401 WREG32(VGA_RENDER_CONTROL, save->vga_render_control); 1344 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1402} 1345}
1403 1346
@@ -1986,10 +1929,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1986 if (rdev->flags & RADEON_IS_IGP) 1929 if (rdev->flags & RADEON_IS_IGP)
1987 rdev->config.evergreen.tile_config |= 1 << 4; 1930 rdev->config.evergreen.tile_config |= 1 << 4;
1988 else { 1931 else {
1989 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 1932 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1990 rdev->config.evergreen.tile_config |= 1 << 4; 1933 case 0: /* four banks */
1991 else
1992 rdev->config.evergreen.tile_config |= 0 << 4; 1934 rdev->config.evergreen.tile_config |= 0 << 4;
1935 break;
1936 case 1: /* eight banks */
1937 rdev->config.evergreen.tile_config |= 1 << 4;
1938 break;
1939 case 2: /* sixteen banks */
1940 default:
1941 rdev->config.evergreen.tile_config |= 2 << 4;
1942 break;
1943 }
1993 } 1944 }
1994 rdev->config.evergreen.tile_config |= 0 << 8; 1945 rdev->config.evergreen.tile_config |= 0 << 8;
1995 rdev->config.evergreen.tile_config |= 1946 rdev->config.evergreen.tile_config |=
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c16554122ccd..e44a62a07fe3 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -788,6 +788,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
788 case V_030000_SQ_TEX_DIM_1D_ARRAY: 788 case V_030000_SQ_TEX_DIM_1D_ARRAY:
789 case V_030000_SQ_TEX_DIM_2D_ARRAY: 789 case V_030000_SQ_TEX_DIM_2D_ARRAY:
790 depth = 1; 790 depth = 1;
791 break;
792 case V_030000_SQ_TEX_DIM_2D_MSAA:
793 case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
794 surf.nsamples = 1 << llevel;
795 llevel = 0;
796 depth = 1;
797 break;
791 case V_030000_SQ_TEX_DIM_3D: 798 case V_030000_SQ_TEX_DIM_3D:
792 break; 799 break;
793 default: 800 default:
@@ -961,13 +968,15 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
961 968
962 if (track->db_dirty) { 969 if (track->db_dirty) {
963 /* Check stencil buffer */ 970 /* Check stencil buffer */
964 if (G_028800_STENCIL_ENABLE(track->db_depth_control)) { 971 if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
972 G_028800_STENCIL_ENABLE(track->db_depth_control)) {
965 r = evergreen_cs_track_validate_stencil(p); 973 r = evergreen_cs_track_validate_stencil(p);
966 if (r) 974 if (r)
967 return r; 975 return r;
968 } 976 }
969 /* Check depth buffer */ 977 /* Check depth buffer */
970 if (G_028800_Z_ENABLE(track->db_depth_control)) { 978 if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
979 G_028800_Z_ENABLE(track->db_depth_control)) {
971 r = evergreen_cs_track_validate_depth(p); 980 r = evergreen_cs_track_validate_depth(p);
972 if (r) 981 if (r)
973 return r; 982 return r;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index d3bd098e4e19..79347855d9bf 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1277,6 +1277,8 @@
1277#define S_028044_FORMAT(x) (((x) & 0x1) << 0) 1277#define S_028044_FORMAT(x) (((x) & 0x1) << 0)
1278#define G_028044_FORMAT(x) (((x) >> 0) & 0x1) 1278#define G_028044_FORMAT(x) (((x) >> 0) & 0x1)
1279#define C_028044_FORMAT 0xFFFFFFFE 1279#define C_028044_FORMAT 0xFFFFFFFE
1280#define V_028044_STENCIL_INVALID 0
1281#define V_028044_STENCIL_8 1
1280#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7) 1282#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7)
1281#define DB_Z_READ_BASE 0x28048 1283#define DB_Z_READ_BASE 0x28048
1282#define DB_STENCIL_READ_BASE 0x2804c 1284#define DB_STENCIL_READ_BASE 0x2804c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9945d86d9001..853800e8582f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -574,10 +574,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
574 if (rdev->flags & RADEON_IS_IGP) 574 if (rdev->flags & RADEON_IS_IGP)
575 rdev->config.cayman.tile_config |= 1 << 4; 575 rdev->config.cayman.tile_config |= 1 << 4;
576 else { 576 else {
577 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 577 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
578 rdev->config.cayman.tile_config |= 1 << 4; 578 case 0: /* four banks */
579 else
580 rdev->config.cayman.tile_config |= 0 << 4; 579 rdev->config.cayman.tile_config |= 0 << 4;
580 break;
581 case 1: /* eight banks */
582 rdev->config.cayman.tile_config |= 1 << 4;
583 break;
584 case 2: /* sixteen banks */
585 default:
586 rdev->config.cayman.tile_config |= 2 << 4;
587 break;
588 }
581 } 589 }
582 rdev->config.cayman.tile_config |= 590 rdev->config.cayman.tile_config |=
583 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 591 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 637280f541a3..d79c639ae739 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3789,3 +3789,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3789 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 3789 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3790 } 3790 }
3791} 3791}
3792
3793/**
3794 * r600_get_gpu_clock - return GPU clock counter snapshot
3795 *
3796 * @rdev: radeon_device pointer
3797 *
3798 * Fetches a GPU clock counter snapshot (R6xx-cayman).
3799 * Returns the 64 bit clock counter snapshot.
3800 */
3801uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
3802{
3803 uint64_t clock;
3804
3805 mutex_lock(&rdev->gpu_clock_mutex);
3806 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3807 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
3808 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3809 mutex_unlock(&rdev->gpu_clock_mutex);
3810 return clock;
3811}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ca87f7afaf23..3dab49cb1d4a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -764,8 +764,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
764 } 764 }
765 765
766 /* Check depth buffer */ 766 /* Check depth buffer */
767 if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) || 767 if (track->db_dirty &&
768 G_028800_Z_ENABLE(track->db_depth_control))) { 768 G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
769 (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
770 G_028800_Z_ENABLE(track->db_depth_control))) {
769 r = r600_cs_track_validate_db(p); 771 r = r600_cs_track_validate_db(p);
770 if (r) 772 if (r)
771 return r; 773 return r;
@@ -1557,13 +1559,14 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1557 u32 tiling_flags) 1559 u32 tiling_flags)
1558{ 1560{
1559 struct r600_cs_track *track = p->track; 1561 struct r600_cs_track *track = p->track;
1560 u32 nfaces, llevel, blevel, w0, h0, d0; 1562 u32 dim, nfaces, llevel, blevel, w0, h0, d0;
1561 u32 word0, word1, l0_size, mipmap_size, word2, word3; 1563 u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1562 u32 height_align, pitch, pitch_align, depth_align; 1564 u32 height_align, pitch, pitch_align, depth_align;
1563 u32 array, barray, larray; 1565 u32 barray, larray;
1564 u64 base_align; 1566 u64 base_align;
1565 struct array_mode_checker array_check; 1567 struct array_mode_checker array_check;
1566 u32 format; 1568 u32 format;
1569 bool is_array;
1567 1570
1568 /* on legacy kernel we don't perform advanced check */ 1571 /* on legacy kernel we don't perform advanced check */
1569 if (p->rdev == NULL) 1572 if (p->rdev == NULL)
@@ -1581,12 +1584,28 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1581 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1584 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1582 } 1585 }
1583 word1 = radeon_get_ib_value(p, idx + 1); 1586 word1 = radeon_get_ib_value(p, idx + 1);
1587 word2 = radeon_get_ib_value(p, idx + 2) << 8;
1588 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1589 word4 = radeon_get_ib_value(p, idx + 4);
1590 word5 = radeon_get_ib_value(p, idx + 5);
1591 dim = G_038000_DIM(word0);
1584 w0 = G_038000_TEX_WIDTH(word0) + 1; 1592 w0 = G_038000_TEX_WIDTH(word0) + 1;
1593 pitch = (G_038000_PITCH(word0) + 1) * 8;
1585 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1594 h0 = G_038004_TEX_HEIGHT(word1) + 1;
1586 d0 = G_038004_TEX_DEPTH(word1); 1595 d0 = G_038004_TEX_DEPTH(word1);
1596 format = G_038004_DATA_FORMAT(word1);
1597 blevel = G_038010_BASE_LEVEL(word4);
1598 llevel = G_038014_LAST_LEVEL(word5);
1599 /* pitch in texels */
1600 array_check.array_mode = G_038000_TILE_MODE(word0);
1601 array_check.group_size = track->group_size;
1602 array_check.nbanks = track->nbanks;
1603 array_check.npipes = track->npipes;
1604 array_check.nsamples = 1;
1605 array_check.blocksize = r600_fmt_get_blocksize(format);
1587 nfaces = 1; 1606 nfaces = 1;
1588 array = 0; 1607 is_array = false;
1589 switch (G_038000_DIM(word0)) { 1608 switch (dim) {
1590 case V_038000_SQ_TEX_DIM_1D: 1609 case V_038000_SQ_TEX_DIM_1D:
1591 case V_038000_SQ_TEX_DIM_2D: 1610 case V_038000_SQ_TEX_DIM_2D:
1592 case V_038000_SQ_TEX_DIM_3D: 1611 case V_038000_SQ_TEX_DIM_3D:
@@ -1599,29 +1618,25 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1599 break; 1618 break;
1600 case V_038000_SQ_TEX_DIM_1D_ARRAY: 1619 case V_038000_SQ_TEX_DIM_1D_ARRAY:
1601 case V_038000_SQ_TEX_DIM_2D_ARRAY: 1620 case V_038000_SQ_TEX_DIM_2D_ARRAY:
1602 array = 1; 1621 is_array = true;
1603 break; 1622 break;
1604 case V_038000_SQ_TEX_DIM_2D_MSAA:
1605 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: 1623 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1624 is_array = true;
1625 /* fall through */
1626 case V_038000_SQ_TEX_DIM_2D_MSAA:
1627 array_check.nsamples = 1 << llevel;
1628 llevel = 0;
1629 break;
1606 default: 1630 default:
1607 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); 1631 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1608 return -EINVAL; 1632 return -EINVAL;
1609 } 1633 }
1610 format = G_038004_DATA_FORMAT(word1);
1611 if (!r600_fmt_is_valid_texture(format, p->family)) { 1634 if (!r600_fmt_is_valid_texture(format, p->family)) {
1612 dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1635 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1613 __func__, __LINE__, format); 1636 __func__, __LINE__, format);
1614 return -EINVAL; 1637 return -EINVAL;
1615 } 1638 }
1616 1639
1617 /* pitch in texels */
1618 pitch = (G_038000_PITCH(word0) + 1) * 8;
1619 array_check.array_mode = G_038000_TILE_MODE(word0);
1620 array_check.group_size = track->group_size;
1621 array_check.nbanks = track->nbanks;
1622 array_check.npipes = track->npipes;
1623 array_check.nsamples = 1;
1624 array_check.blocksize = r600_fmt_get_blocksize(format);
1625 if (r600_get_array_mode_alignment(&array_check, 1640 if (r600_get_array_mode_alignment(&array_check,
1626 &pitch_align, &height_align, &depth_align, &base_align)) { 1641 &pitch_align, &height_align, &depth_align, &base_align)) {
1627 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", 1642 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1647,20 +1662,13 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1647 return -EINVAL; 1662 return -EINVAL;
1648 } 1663 }
1649 1664
1650 word2 = radeon_get_ib_value(p, idx + 2) << 8;
1651 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1652
1653 word0 = radeon_get_ib_value(p, idx + 4);
1654 word1 = radeon_get_ib_value(p, idx + 5);
1655 blevel = G_038010_BASE_LEVEL(word0);
1656 llevel = G_038014_LAST_LEVEL(word1);
1657 if (blevel > llevel) { 1665 if (blevel > llevel) {
1658 dev_warn(p->dev, "texture blevel %d > llevel %d\n", 1666 dev_warn(p->dev, "texture blevel %d > llevel %d\n",
1659 blevel, llevel); 1667 blevel, llevel);
1660 } 1668 }
1661 if (array == 1) { 1669 if (is_array) {
1662 barray = G_038014_BASE_ARRAY(word1); 1670 barray = G_038014_BASE_ARRAY(word5);
1663 larray = G_038014_LAST_ARRAY(word1); 1671 larray = G_038014_LAST_ARRAY(word5);
1664 1672
1665 nfaces = larray - barray + 1; 1673 nfaces = larray - barray + 1;
1666 } 1674 }
@@ -1677,7 +1685,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1677 return -EINVAL; 1685 return -EINVAL;
1678 } 1686 }
1679 /* using get ib will give us the offset into the mipmap bo */ 1687 /* using get ib will give us the offset into the mipmap bo */
1680 word3 = radeon_get_ib_value(p, idx + 3) << 8;
1681 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { 1688 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1682 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1689 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1683 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ 1690 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4b116ae75fc2..fd328f4c3ea8 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -602,6 +602,9 @@
602#define RLC_HB_WPTR 0x3f1c 602#define RLC_HB_WPTR 0x3f1c
603#define RLC_HB_WPTR_LSB_ADDR 0x3f14 603#define RLC_HB_WPTR_LSB_ADDR 0x3f14
604#define RLC_HB_WPTR_MSB_ADDR 0x3f18 604#define RLC_HB_WPTR_MSB_ADDR 0x3f18
605#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38
606#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c
607#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40
605#define RLC_MC_CNTL 0x3f44 608#define RLC_MC_CNTL 0x3f44
606#define RLC_UCODE_CNTL 0x3f48 609#define RLC_UCODE_CNTL 0x3f48
607#define RLC_UCODE_ADDR 0x3f2c 610#define RLC_UCODE_ADDR 0x3f2c
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5431af292408..99304194a65c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -300,6 +300,7 @@ struct radeon_bo_va {
300 uint64_t soffset; 300 uint64_t soffset;
301 uint64_t eoffset; 301 uint64_t eoffset;
302 uint32_t flags; 302 uint32_t flags;
303 struct radeon_fence *fence;
303 bool valid; 304 bool valid;
304}; 305};
305 306
@@ -1533,6 +1534,7 @@ struct radeon_device {
1533 unsigned debugfs_count; 1534 unsigned debugfs_count;
1534 /* virtual memory */ 1535 /* virtual memory */
1535 struct radeon_vm_manager vm_manager; 1536 struct radeon_vm_manager vm_manager;
1537 struct mutex gpu_clock_mutex;
1536}; 1538};
1537 1539
1538int radeon_device_init(struct radeon_device *rdev, 1540int radeon_device_init(struct radeon_device *rdev,
@@ -1733,11 +1735,11 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1733#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) 1735#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
1734#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) 1736#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
1735#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) 1737#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
1736#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc)) 1738#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
1737#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base)) 1739#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
1738#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc)) 1740#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
1739#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc)) 1741#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
1740#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev)) 1742#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
1741 1743
1742/* Common functions */ 1744/* Common functions */
1743/* AGP */ 1745/* AGP */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f4af24310438..18c38d14c8cd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -255,13 +255,10 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
255 * rv515 255 * rv515
256 */ 256 */
257struct rv515_mc_save { 257struct rv515_mc_save {
258 u32 d1vga_control;
259 u32 d2vga_control;
260 u32 vga_render_control; 258 u32 vga_render_control;
261 u32 vga_hdp_control; 259 u32 vga_hdp_control;
262 u32 d1crtc_control;
263 u32 d2crtc_control;
264}; 260};
261
265int rv515_init(struct radeon_device *rdev); 262int rv515_init(struct radeon_device *rdev);
266void rv515_fini(struct radeon_device *rdev); 263void rv515_fini(struct radeon_device *rdev);
267uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); 264uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -371,6 +368,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
371 unsigned num_gpu_pages, 368 unsigned num_gpu_pages,
372 struct radeon_sa_bo *vb); 369 struct radeon_sa_bo *vb);
373int r600_mc_wait_for_idle(struct radeon_device *rdev); 370int r600_mc_wait_for_idle(struct radeon_device *rdev);
371uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
374 372
375/* 373/*
376 * rv770,rv730,rv710,rv740 374 * rv770,rv730,rv710,rv740
@@ -389,11 +387,10 @@ void r700_cp_fini(struct radeon_device *rdev);
389 * evergreen 387 * evergreen
390 */ 388 */
391struct evergreen_mc_save { 389struct evergreen_mc_save {
392 u32 vga_control[6];
393 u32 vga_render_control; 390 u32 vga_render_control;
394 u32 vga_hdp_control; 391 u32 vga_hdp_control;
395 u32 crtc_control[6];
396}; 392};
393
397void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); 394void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
398int evergreen_init(struct radeon_device *rdev); 395int evergreen_init(struct radeon_device *rdev);
399void evergreen_fini(struct radeon_device *rdev); 396void evergreen_fini(struct radeon_device *rdev);
@@ -472,5 +469,6 @@ int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
472void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); 469void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
473void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); 470void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
474int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 471int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
472uint64_t si_get_gpu_clock(struct radeon_device *rdev);
475 473
476#endif 474#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index b1e3820df363..f9c21f9d16bc 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1263,6 +1263,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1263union igp_info { 1263union igp_info {
1264 struct _ATOM_INTEGRATED_SYSTEM_INFO info; 1264 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
1265 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; 1265 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
1266 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
1267 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
1266}; 1268};
1267 1269
1268bool radeon_atombios_sideport_present(struct radeon_device *rdev) 1270bool radeon_atombios_sideport_present(struct radeon_device *rdev)
@@ -1390,27 +1392,50 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
1390 struct radeon_mode_info *mode_info = &rdev->mode_info; 1392 struct radeon_mode_info *mode_info = &rdev->mode_info;
1391 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 1393 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
1392 u16 data_offset, size; 1394 u16 data_offset, size;
1393 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; 1395 union igp_info *igp_info;
1394 u8 frev, crev; 1396 u8 frev, crev;
1395 u16 percentage = 0, rate = 0; 1397 u16 percentage = 0, rate = 0;
1396 1398
1397 /* get any igp specific overrides */ 1399 /* get any igp specific overrides */
1398 if (atom_parse_data_header(mode_info->atom_context, index, &size, 1400 if (atom_parse_data_header(mode_info->atom_context, index, &size,
1399 &frev, &crev, &data_offset)) { 1401 &frev, &crev, &data_offset)) {
1400 igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) 1402 igp_info = (union igp_info *)
1401 (mode_info->atom_context->bios + data_offset); 1403 (mode_info->atom_context->bios + data_offset);
1402 switch (id) { 1404 switch (crev) {
1403 case ASIC_INTERNAL_SS_ON_TMDS: 1405 case 6:
1404 percentage = le16_to_cpu(igp_info->usDVISSPercentage); 1406 switch (id) {
1405 rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); 1407 case ASIC_INTERNAL_SS_ON_TMDS:
1408 percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
1409 rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
1410 break;
1411 case ASIC_INTERNAL_SS_ON_HDMI:
1412 percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
1413 rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
1414 break;
1415 case ASIC_INTERNAL_SS_ON_LVDS:
1416 percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
1417 rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
1418 break;
1419 }
1406 break; 1420 break;
1407 case ASIC_INTERNAL_SS_ON_HDMI: 1421 case 7:
1408 percentage = le16_to_cpu(igp_info->usHDMISSPercentage); 1422 switch (id) {
1409 rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); 1423 case ASIC_INTERNAL_SS_ON_TMDS:
1424 percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
1425 rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
1426 break;
1427 case ASIC_INTERNAL_SS_ON_HDMI:
1428 percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
1429 rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
1430 break;
1431 case ASIC_INTERNAL_SS_ON_LVDS:
1432 percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
1433 rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
1434 break;
1435 }
1410 break; 1436 break;
1411 case ASIC_INTERNAL_SS_ON_LVDS: 1437 default:
1412 percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); 1438 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
1413 rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
1414 break; 1439 break;
1415 } 1440 }
1416 if (percentage) 1441 if (percentage)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 576f4f6919f2..f75247d42ffd 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -719,6 +719,34 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
719 return i2c; 719 return i2c;
720} 720}
721 721
722static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
723{
724 struct drm_device *dev = rdev->ddev;
725 struct radeon_i2c_bus_rec i2c;
726 u16 offset;
727 u8 id, blocks, clk, data;
728 int i;
729
730 i2c.valid = false;
731
732 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
733 if (offset) {
734 blocks = RBIOS8(offset + 2);
735 for (i = 0; i < blocks; i++) {
736 id = RBIOS8(offset + 3 + (i * 5) + 0);
737 if (id == 136) {
738 clk = RBIOS8(offset + 3 + (i * 5) + 3);
739 data = RBIOS8(offset + 3 + (i * 5) + 4);
740 /* gpiopad */
741 i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
742 (1 << clk), (1 << data));
743 break;
744 }
745 }
746 }
747 return i2c;
748}
749
722void radeon_combios_i2c_init(struct radeon_device *rdev) 750void radeon_combios_i2c_init(struct radeon_device *rdev)
723{ 751{
724 struct drm_device *dev = rdev->ddev; 752 struct drm_device *dev = rdev->ddev;
@@ -755,30 +783,14 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
755 } else if (rdev->family == CHIP_RS300 || 783 } else if (rdev->family == CHIP_RS300 ||
756 rdev->family == CHIP_RS400 || 784 rdev->family == CHIP_RS400 ||
757 rdev->family == CHIP_RS480) { 785 rdev->family == CHIP_RS480) {
758 u16 offset;
759 u8 id, blocks, clk, data;
760 int i;
761
762 /* 0x68 */ 786 /* 0x68 */
763 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); 787 i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
764 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); 788 rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
765 789
766 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); 790 /* gpiopad */
767 if (offset) { 791 i2c = radeon_combios_get_i2c_info_from_table(rdev);
768 blocks = RBIOS8(offset + 2); 792 if (i2c.valid)
769 for (i = 0; i < blocks; i++) { 793 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
770 id = RBIOS8(offset + 3 + (i * 5) + 0);
771 if (id == 136) {
772 clk = RBIOS8(offset + 3 + (i * 5) + 3);
773 data = RBIOS8(offset + 3 + (i * 5) + 4);
774 /* gpiopad */
775 i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
776 (1 << clk), (1 << data));
777 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
778 break;
779 }
780 }
781 }
782 } else if ((rdev->family == CHIP_R200) || 794 } else if ((rdev->family == CHIP_R200) ||
783 (rdev->family >= CHIP_R300)) { 795 (rdev->family >= CHIP_R300)) {
784 /* 0x68 */ 796 /* 0x68 */
@@ -2321,7 +2333,10 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2321 connector = (tmp >> 12) & 0xf; 2333 connector = (tmp >> 12) & 0xf;
2322 2334
2323 ddc_type = (tmp >> 8) & 0xf; 2335 ddc_type = (tmp >> 8) & 0xf;
2324 ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); 2336 if (ddc_type == 5)
2337 ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
2338 else
2339 ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
2325 2340
2326 switch (connector) { 2341 switch (connector) {
2327 case CONNECTOR_PROPRIETARY_LEGACY: 2342 case CONNECTOR_PROPRIETARY_LEGACY:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 8a4c49ef0cc4..b4a0db24f4dd 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
278 return 0; 278 return 0;
279} 279}
280 280
281static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
282 struct radeon_fence *fence)
283{
284 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
285 struct radeon_vm *vm = &fpriv->vm;
286 struct radeon_bo_list *lobj;
287
288 if (parser->chunk_ib_idx == -1) {
289 return;
290 }
291 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
292 return;
293 }
294
295 list_for_each_entry(lobj, &parser->validated, tv.head) {
296 struct radeon_bo_va *bo_va;
297 struct radeon_bo *rbo = lobj->bo;
298
299 bo_va = radeon_bo_va(rbo, vm);
300 radeon_fence_unref(&bo_va->fence);
301 bo_va->fence = radeon_fence_ref(fence);
302 }
303}
304
281/** 305/**
282 * cs_parser_fini() - clean parser states 306 * cs_parser_fini() - clean parser states
283 * @parser: parser structure holding parsing context. 307 * @parser: parser structure holding parsing context.
@@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
290{ 314{
291 unsigned i; 315 unsigned i;
292 316
293 if (!error) 317 if (!error) {
318 /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
319 radeon_bo_vm_fence_va(parser, parser->ib.fence);
294 ttm_eu_fence_buffer_objects(&parser->validated, 320 ttm_eu_fence_buffer_objects(&parser->validated,
295 parser->ib.fence); 321 parser->ib.fence);
296 else 322 } else {
297 ttm_eu_backoff_reservation(&parser->validated); 323 ttm_eu_backoff_reservation(&parser->validated);
324 }
298 325
299 if (parser->relocs != NULL) { 326 if (parser->relocs != NULL) {
300 for (i = 0; i < parser->nrelocs; i++) { 327 for (i = 0; i < parser->nrelocs; i++) {
@@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
388 415
389 if (parser->chunk_ib_idx == -1) 416 if (parser->chunk_ib_idx == -1)
390 return 0; 417 return 0;
391
392 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) 418 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
393 return 0; 419 return 0;
394 420
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 711e95ad39bf..8794744cdf1a 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -67,7 +67,8 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
67 67
68 if (ASIC_IS_DCE4(rdev)) { 68 if (ASIC_IS_DCE4(rdev)) {
69 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); 69 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
70 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); 70 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
71 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
71 } else if (ASIC_IS_AVIVO(rdev)) { 72 } else if (ASIC_IS_AVIVO(rdev)) {
72 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 73 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
73 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 74 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
@@ -94,7 +95,8 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
94 if (ASIC_IS_DCE4(rdev)) { 95 if (ASIC_IS_DCE4(rdev)) {
95 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); 96 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
96 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | 97 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
97 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); 98 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
99 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
98 } else if (ASIC_IS_AVIVO(rdev)) { 100 } else if (ASIC_IS_AVIVO(rdev)) {
99 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 101 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
100 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | 102 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 742af8244e89..d2e243867ac6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1009,6 +1009,7 @@ int radeon_device_init(struct radeon_device *rdev,
1009 atomic_set(&rdev->ih.lock, 0); 1009 atomic_set(&rdev->ih.lock, 0);
1010 mutex_init(&rdev->gem.mutex); 1010 mutex_init(&rdev->gem.mutex);
1011 mutex_init(&rdev->pm.mutex); 1011 mutex_init(&rdev->pm.mutex);
1012 mutex_init(&rdev->gpu_clock_mutex);
1012 init_rwsem(&rdev->pm.mclk_lock); 1013 init_rwsem(&rdev->pm.mclk_lock);
1013 init_rwsem(&rdev->exclusive_lock); 1014 init_rwsem(&rdev->exclusive_lock);
1014 init_waitqueue_head(&rdev->irq.vblank_queue); 1015 init_waitqueue_head(&rdev->irq.vblank_queue);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dcea6f01ae4e..d7269f48d37c 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -59,9 +59,12 @@
59 * 2.15.0 - add max_pipes query 59 * 2.15.0 - add max_pipes query
60 * 2.16.0 - fix evergreen 2D tiled surface calculation 60 * 2.16.0 - fix evergreen 2D tiled surface calculation
61 * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx 61 * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
62 * 2.18.0 - r600-eg: allow "invalid" DB formats
63 * 2.19.0 - r600-eg: MSAA textures
64 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
62 */ 65 */
63#define KMS_DRIVER_MAJOR 2 66#define KMS_DRIVER_MAJOR 2
64#define KMS_DRIVER_MINOR 17 67#define KMS_DRIVER_MINOR 20
65#define KMS_DRIVER_PATCHLEVEL 0 68#define KMS_DRIVER_PATCHLEVEL 0
66int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 69int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
67int radeon_driver_unload_kms(struct drm_device *dev); 70int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index b3720054614d..bb3b7fe05ccd 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
814 return -EINVAL; 814 return -EINVAL;
815 } 815 }
816 816
817 if (bo_va->valid) 817 if (bo_va->valid && mem)
818 return 0; 818 return 0;
819 819
820 ngpu_pages = radeon_bo_ngpu_pages(bo); 820 ngpu_pages = radeon_bo_ngpu_pages(bo);
@@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
859 struct radeon_bo *bo) 859 struct radeon_bo *bo)
860{ 860{
861 struct radeon_bo_va *bo_va; 861 struct radeon_bo_va *bo_va;
862 int r;
862 863
863 bo_va = radeon_bo_va(bo, vm); 864 bo_va = radeon_bo_va(bo, vm);
864 if (bo_va == NULL) 865 if (bo_va == NULL)
865 return 0; 866 return 0;
866 867
868 /* wait for va use to end */
869 while (bo_va->fence) {
870 r = radeon_fence_wait(bo_va->fence, false);
871 if (r) {
872 DRM_ERROR("error while waiting for fence: %d\n", r);
873 }
874 if (r == -EDEADLK) {
875 r = radeon_gpu_reset(rdev);
876 if (!r)
877 continue;
878 }
879 break;
880 }
881 radeon_fence_unref(&bo_va->fence);
882
867 mutex_lock(&rdev->vm_manager.lock); 883 mutex_lock(&rdev->vm_manager.lock);
868 mutex_lock(&vm->mutex); 884 mutex_lock(&vm->mutex);
869 radeon_vm_bo_update_pte(rdev, vm, bo, NULL); 885 radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
@@ -934,7 +950,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
934} 950}
935 951
936/** 952/**
937 * radeon_vm_init - tear down a vm instance 953 * radeon_vm_fini - tear down a vm instance
938 * 954 *
939 * @rdev: radeon_device pointer 955 * @rdev: radeon_device pointer
940 * @vm: requested vm 956 * @vm: requested vm
@@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
952 radeon_vm_unbind_locked(rdev, vm); 968 radeon_vm_unbind_locked(rdev, vm);
953 mutex_unlock(&rdev->vm_manager.lock); 969 mutex_unlock(&rdev->vm_manager.lock);
954 970
955 /* remove all bo */ 971 /* remove all bo at this point non are busy any more because unbind
972 * waited for the last vm fence to signal
973 */
956 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 974 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
957 if (!r) { 975 if (!r) {
958 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); 976 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
959 list_del_init(&bo_va->bo_list); 977 list_del_init(&bo_va->bo_list);
960 list_del_init(&bo_va->vm_list); 978 list_del_init(&bo_va->vm_list);
979 radeon_fence_unref(&bo_va->fence);
961 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 980 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
962 kfree(bo_va); 981 kfree(bo_va);
963 } 982 }
@@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
969 r = radeon_bo_reserve(bo_va->bo, false); 988 r = radeon_bo_reserve(bo_va->bo, false);
970 if (!r) { 989 if (!r) {
971 list_del_init(&bo_va->bo_list); 990 list_del_init(&bo_va->bo_list);
991 radeon_fence_unref(&bo_va->fence);
972 radeon_bo_unreserve(bo_va->bo); 992 radeon_bo_unreserve(bo_va->bo);
973 kfree(bo_va); 993 kfree(bo_va);
974 } 994 }
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 84d045245739..1b57b0058ad6 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
134 struct radeon_device *rdev = rbo->rdev; 134 struct radeon_device *rdev = rbo->rdev;
135 struct radeon_fpriv *fpriv = file_priv->driver_priv; 135 struct radeon_fpriv *fpriv = file_priv->driver_priv;
136 struct radeon_vm *vm = &fpriv->vm; 136 struct radeon_vm *vm = &fpriv->vm;
137 struct radeon_bo_va *bo_va, *tmp;
138 137
139 if (rdev->family < CHIP_CAYMAN) { 138 if (rdev->family < CHIP_CAYMAN) {
140 return; 139 return;
141 } 140 }
142 141
143 if (radeon_bo_reserve(rbo, false)) { 142 if (radeon_bo_reserve(rbo, false)) {
143 dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
144 return; 144 return;
145 } 145 }
146 list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) { 146 radeon_vm_bo_rmv(rdev, vm, rbo);
147 if (bo_va->vm == vm) {
148 /* remove from this vm address space */
149 mutex_lock(&vm->mutex);
150 list_del(&bo_va->vm_list);
151 mutex_unlock(&vm->mutex);
152 list_del(&bo_va->bo_list);
153 kfree(bo_va);
154 }
155 }
156 radeon_bo_unreserve(rbo); 147 radeon_bo_unreserve(rbo);
157} 148}
158 149
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 1d73f16b5d97..414b4acf6947 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -29,6 +29,7 @@
29#include "drm_sarea.h" 29#include "drm_sarea.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_drm.h" 31#include "radeon_drm.h"
32#include "radeon_asic.h"
32 33
33#include <linux/vga_switcheroo.h> 34#include <linux/vga_switcheroo.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
@@ -167,17 +168,39 @@ static void radeon_set_filp_rights(struct drm_device *dev,
167int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 168int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
168{ 169{
169 struct radeon_device *rdev = dev->dev_private; 170 struct radeon_device *rdev = dev->dev_private;
170 struct drm_radeon_info *info; 171 struct drm_radeon_info *info = data;
171 struct radeon_mode_info *minfo = &rdev->mode_info; 172 struct radeon_mode_info *minfo = &rdev->mode_info;
172 uint32_t *value_ptr; 173 uint32_t value, *value_ptr;
173 uint32_t value; 174 uint64_t value64, *value_ptr64;
174 struct drm_crtc *crtc; 175 struct drm_crtc *crtc;
175 int i, found; 176 int i, found;
176 177
177 info = data; 178 /* TIMESTAMP is a 64-bit value, needs special handling. */
179 if (info->request == RADEON_INFO_TIMESTAMP) {
180 if (rdev->family >= CHIP_R600) {
181 value_ptr64 = (uint64_t*)((unsigned long)info->value);
182 if (rdev->family >= CHIP_TAHITI) {
183 value64 = si_get_gpu_clock(rdev);
184 } else {
185 value64 = r600_get_gpu_clock(rdev);
186 }
187
188 if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
189 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
190 return -EFAULT;
191 }
192 return 0;
193 } else {
194 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
195 return -EINVAL;
196 }
197 }
198
178 value_ptr = (uint32_t *)((unsigned long)info->value); 199 value_ptr = (uint32_t *)((unsigned long)info->value);
179 if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) 200 if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
201 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
180 return -EFAULT; 202 return -EFAULT;
203 }
181 204
182 switch (info->request) { 205 switch (info->request) {
183 case RADEON_INFO_DEVICE_ID: 206 case RADEON_INFO_DEVICE_ID:
@@ -337,7 +360,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
337 return -EINVAL; 360 return -EINVAL;
338 } 361 }
339 if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { 362 if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
340 DRM_ERROR("copy_to_user\n"); 363 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
341 return -EFAULT; 364 return -EFAULT;
342 } 365 }
343 return 0; 366 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index d5fd615897ec..94b4a1c12893 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1025,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1025 1025
1026static void radeon_crtc_prepare(struct drm_crtc *crtc) 1026static void radeon_crtc_prepare(struct drm_crtc *crtc)
1027{ 1027{
1028 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1028 struct drm_device *dev = crtc->dev; 1029 struct drm_device *dev = crtc->dev;
1029 struct drm_crtc *crtci; 1030 struct drm_crtc *crtci;
1030 1031
1032 radeon_crtc->in_mode_set = true;
1031 /* 1033 /*
1032 * The hardware wedges sometimes if you reconfigure one CRTC 1034 * The hardware wedges sometimes if you reconfigure one CRTC
1033 * whilst another is running (see fdo bug #24611). 1035 * whilst another is running (see fdo bug #24611).
@@ -1038,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
1038 1040
1039static void radeon_crtc_commit(struct drm_crtc *crtc) 1041static void radeon_crtc_commit(struct drm_crtc *crtc)
1040{ 1042{
1043 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1041 struct drm_device *dev = crtc->dev; 1044 struct drm_device *dev = crtc->dev;
1042 struct drm_crtc *crtci; 1045 struct drm_crtc *crtci;
1043 1046
@@ -1048,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
1048 if (crtci->enabled) 1051 if (crtci->enabled)
1049 radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); 1052 radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
1050 } 1053 }
1054 radeon_crtc->in_mode_set = false;
1051} 1055}
1052 1056
1053static const struct drm_crtc_helper_funcs legacy_helper_funcs = { 1057static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f380d59c5763..d56978949f34 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -275,6 +275,7 @@ struct radeon_crtc {
275 u16 lut_r[256], lut_g[256], lut_b[256]; 275 u16 lut_r[256], lut_g[256], lut_b[256];
276 bool enabled; 276 bool enabled;
277 bool can_tile; 277 bool can_tile;
278 bool in_mode_set;
278 uint32_t crtc_offset; 279 uint32_t crtc_offset;
279 struct drm_gem_object *cursor_bo; 280 struct drm_gem_object *cursor_bo;
280 uint64_t cursor_addr; 281 uint64_t cursor_addr;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f1a4c803c1d..1cb014b571ab 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
52 52
53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { 53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
54 /* remove from all vm address space */ 54 /* remove from all vm address space */
55 mutex_lock(&bo_va->vm->mutex); 55 radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
56 list_del(&bo_va->vm_list);
57 mutex_unlock(&bo_va->vm->mutex);
58 list_del(&bo_va->bo_list);
59 kfree(bo_va);
60 } 56 }
61} 57}
62 58
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index a12fbcc8ccb6..aa8ef491ef3c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
281 281
282void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) 282void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
283{ 283{
284 save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
285 save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
286 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); 284 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
287 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); 285 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
288 save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
289 save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
290 286
291 /* Stop all video */ 287 /* Stop all video */
292 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); 288 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
@@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
311 /* Unlock host access */ 307 /* Unlock host access */
312 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); 308 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
313 mdelay(1); 309 mdelay(1);
314 /* Restore video state */
315 WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
316 WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
317 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
318 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
319 WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
320 WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
321 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
322 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
323 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); 310 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
324} 311}
325 312
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index c053f8193771..0139e227e3c7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1639,11 +1639,19 @@ static void si_gpu_init(struct radeon_device *rdev)
1639 /* XXX what about 12? */ 1639 /* XXX what about 12? */
1640 rdev->config.si.tile_config |= (3 << 0); 1640 rdev->config.si.tile_config |= (3 << 0);
1641 break; 1641 break;
1642 } 1642 }
1643 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) 1643 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1644 rdev->config.si.tile_config |= 1 << 4; 1644 case 0: /* four banks */
1645 else
1646 rdev->config.si.tile_config |= 0 << 4; 1645 rdev->config.si.tile_config |= 0 << 4;
1646 break;
1647 case 1: /* eight banks */
1648 rdev->config.si.tile_config |= 1 << 4;
1649 break;
1650 case 2: /* sixteen banks */
1651 default:
1652 rdev->config.si.tile_config |= 2 << 4;
1653 break;
1654 }
1647 rdev->config.si.tile_config |= 1655 rdev->config.si.tile_config |=
1648 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 1656 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1649 rdev->config.si.tile_config |= 1657 rdev->config.si.tile_config |=
@@ -3960,3 +3968,22 @@ void si_fini(struct radeon_device *rdev)
3960 rdev->bios = NULL; 3968 rdev->bios = NULL;
3961} 3969}
3962 3970
3971/**
3972 * si_get_gpu_clock - return GPU clock counter snapshot
3973 *
3974 * @rdev: radeon_device pointer
3975 *
3976 * Fetches a GPU clock counter snapshot (SI).
3977 * Returns the 64 bit clock counter snapshot.
3978 */
3979uint64_t si_get_gpu_clock(struct radeon_device *rdev)
3980{
3981 uint64_t clock;
3982
3983 mutex_lock(&rdev->gpu_clock_mutex);
3984 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3985 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
3986 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3987 mutex_unlock(&rdev->gpu_clock_mutex);
3988 return clock;
3989}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7869089e8761..ef4815c27b1c 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -698,6 +698,9 @@
698#define RLC_UCODE_ADDR 0xC32C 698#define RLC_UCODE_ADDR 0xC32C
699#define RLC_UCODE_DATA 0xC330 699#define RLC_UCODE_DATA 0xC330
700 700
701#define RLC_GPU_CLOCK_COUNT_LSB 0xC338
702#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C
703#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
701#define RLC_MC_CNTL 0xC344 704#define RLC_MC_CNTL 0xC344
702#define RLC_UCODE_CNTL 0xC348 705#define RLC_UCODE_CNTL 0xC348
703 706
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 7bd65bdd15a8..291ecc145585 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -308,7 +308,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
308 /* need to attach */ 308 /* need to attach */
309 attach = dma_buf_attach(dma_buf, dev->dev); 309 attach = dma_buf_attach(dma_buf, dev->dev);
310 if (IS_ERR(attach)) 310 if (IS_ERR(attach))
311 return ERR_PTR(PTR_ERR(attach)); 311 return ERR_CAST(attach);
312 312
313 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 313 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
314 if (IS_ERR(sg)) { 314 if (IS_ERR(sg)) {
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6d1cbdfc9b2a..b64502dfa9f4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -296,8 +296,13 @@ static int iommu_init_device(struct device *dev)
296 } else 296 } else
297 dma_pdev = pci_dev_get(pdev); 297 dma_pdev = pci_dev_get(pdev);
298 298
299 /* Account for quirked devices */
299 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); 300 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
300 301
302 /*
303 * If it's a multifunction device that does not support our
304 * required ACS flags, add to the same group as function 0.
305 */
301 if (dma_pdev->multifunction && 306 if (dma_pdev->multifunction &&
302 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) 307 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
303 swap_pci_ref(&dma_pdev, 308 swap_pci_ref(&dma_pdev,
@@ -305,14 +310,28 @@ static int iommu_init_device(struct device *dev)
305 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 310 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
306 0))); 311 0)));
307 312
313 /*
314 * Devices on the root bus go through the iommu. If that's not us,
315 * find the next upstream device and test ACS up to the root bus.
316 * Finding the next device may require skipping virtual buses.
317 */
308 while (!pci_is_root_bus(dma_pdev->bus)) { 318 while (!pci_is_root_bus(dma_pdev->bus)) {
309 if (pci_acs_path_enabled(dma_pdev->bus->self, 319 struct pci_bus *bus = dma_pdev->bus;
310 NULL, REQ_ACS_FLAGS)) 320
321 while (!bus->self) {
322 if (!pci_is_root_bus(bus))
323 bus = bus->parent;
324 else
325 goto root_bus;
326 }
327
328 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
311 break; 329 break;
312 330
313 swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); 331 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
314 } 332 }
315 333
334root_bus:
316 group = iommu_group_get(&dma_pdev->dev); 335 group = iommu_group_get(&dma_pdev->dev);
317 pci_dev_put(dma_pdev); 336 pci_dev_put(dma_pdev);
318 if (!group) { 337 if (!group) {
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 500e7f15f5c2..0a2ea317120a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1131,9 +1131,6 @@ static int __init amd_iommu_init_pci(void)
1131 break; 1131 break;
1132 } 1132 }
1133 1133
1134 /* Make sure ACS will be enabled */
1135 pci_request_acs();
1136
1137 ret = amd_iommu_init_devices(); 1134 ret = amd_iommu_init_devices();
1138 1135
1139 print_iommu_info(); 1136 print_iommu_info();
@@ -1652,6 +1649,9 @@ static bool detect_ivrs(void)
1652 1649
1653 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); 1650 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
1654 1651
1652 /* Make sure ACS will be enabled during PCI probe */
1653 pci_request_acs();
1654
1655 return true; 1655 return true;
1656} 1656}
1657 1657
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 45350ff5e93c..80bad32aa463 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
732 spin_lock_init(&priv->pgtablelock); 732 spin_lock_init(&priv->pgtablelock);
733 INIT_LIST_HEAD(&priv->clients); 733 INIT_LIST_HEAD(&priv->clients);
734 734
735 dom->geometry.aperture_start = 0; 735 domain->geometry.aperture_start = 0;
736 dom->geometry.aperture_end = ~0UL; 736 domain->geometry.aperture_end = ~0UL;
737 dom->geometry.force_aperture = true; 737 domain->geometry.force_aperture = true;
738 738
739 domain->priv = priv; 739 domain->priv = priv;
740 return 0; 740 return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 7469b5346643..2297ec193eb4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2008,6 +2008,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
2008 if (!drhd) { 2008 if (!drhd) {
2009 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", 2009 printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
2010 pci_name(pdev)); 2010 pci_name(pdev));
2011 free_domain_mem(domain);
2011 return NULL; 2012 return NULL;
2012 } 2013 }
2013 iommu = drhd->iommu; 2014 iommu = drhd->iommu;
@@ -4124,8 +4125,13 @@ static int intel_iommu_add_device(struct device *dev)
4124 } else 4125 } else
4125 dma_pdev = pci_dev_get(pdev); 4126 dma_pdev = pci_dev_get(pdev);
4126 4127
4128 /* Account for quirked devices */
4127 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); 4129 swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
4128 4130
4131 /*
4132 * If it's a multifunction device that does not support our
4133 * required ACS flags, add to the same group as function 0.
4134 */
4129 if (dma_pdev->multifunction && 4135 if (dma_pdev->multifunction &&
4130 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) 4136 !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
4131 swap_pci_ref(&dma_pdev, 4137 swap_pci_ref(&dma_pdev,
@@ -4133,14 +4139,28 @@ static int intel_iommu_add_device(struct device *dev)
4133 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 4139 PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
4134 0))); 4140 0)));
4135 4141
4142 /*
4143 * Devices on the root bus go through the iommu. If that's not us,
4144 * find the next upstream device and test ACS up to the root bus.
4145 * Finding the next device may require skipping virtual buses.
4146 */
4136 while (!pci_is_root_bus(dma_pdev->bus)) { 4147 while (!pci_is_root_bus(dma_pdev->bus)) {
4137 if (pci_acs_path_enabled(dma_pdev->bus->self, 4148 struct pci_bus *bus = dma_pdev->bus;
4138 NULL, REQ_ACS_FLAGS)) 4149
4150 while (!bus->self) {
4151 if (!pci_is_root_bus(bus))
4152 bus = bus->parent;
4153 else
4154 goto root_bus;
4155 }
4156
4157 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
4139 break; 4158 break;
4140 4159
4141 swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); 4160 swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
4142 } 4161 }
4143 4162
4163root_bus:
4144 group = iommu_group_get(&dma_pdev->dev); 4164 group = iommu_group_get(&dma_pdev->dev);
4145 pci_dev_put(dma_pdev); 4165 pci_dev_put(dma_pdev);
4146 if (!group) { 4166 if (!group) {
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4ba325ab6262..2a4bb36bc688 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain,
799 goto out; 799 goto out;
800 } 800 }
801 } 801 }
802 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev)); 802 dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
803out: 803out:
804 spin_unlock(&as->client_lock); 804 spin_unlock(&as->client_lock);
805} 805}
806 806
807static int smmu_iommu_domain_init(struct iommu_domain *domain) 807static int smmu_iommu_domain_init(struct iommu_domain *domain)
808{ 808{
809 int i, err = -ENODEV; 809 int i, err = -EAGAIN;
810 unsigned long flags; 810 unsigned long flags;
811 struct smmu_as *as; 811 struct smmu_as *as;
812 struct smmu_device *smmu = smmu_handle; 812 struct smmu_device *smmu = smmu_handle;
@@ -814,11 +814,14 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain)
814 /* Look for a free AS with lock held */ 814 /* Look for a free AS with lock held */
815 for (i = 0; i < smmu->num_as; i++) { 815 for (i = 0; i < smmu->num_as; i++) {
816 as = &smmu->as[i]; 816 as = &smmu->as[i];
817 if (!as->pdir_page) { 817
818 err = alloc_pdir(as); 818 if (as->pdir_page)
819 if (!err) 819 continue;
820 goto found; 820
821 } 821 err = alloc_pdir(as);
822 if (!err)
823 goto found;
824
822 if (err != -EAGAIN) 825 if (err != -EAGAIN)
823 break; 826 break;
824 } 827 }
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 5405ec644db3..baf2686aa8eb 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -16,7 +16,6 @@
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include "isdnloop.h" 17#include "isdnloop.h"
18 18
19static char *revision = "$Revision: 1.11.6.7 $";
20static char *isdnloop_id = "loop0"; 19static char *isdnloop_id = "loop0";
21 20
22MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card"); 21MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
@@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
1494static int __init 1493static int __init
1495isdnloop_init(void) 1494isdnloop_init(void)
1496{ 1495{
1497 char *p;
1498 char rev[10];
1499
1500 if ((p = strchr(revision, ':'))) {
1501 strcpy(rev, p + 1);
1502 p = strchr(rev, '$');
1503 *p = 0;
1504 } else
1505 strcpy(rev, " ??? ");
1506 printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
1507
1508 if (isdnloop_id) 1496 if (isdnloop_id)
1509 return (isdnloop_addcard(isdnloop_id)); 1497 return (isdnloop_addcard(isdnloop_id));
1510 1498
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 0dc8abca1407..949cabb88f1c 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
2222 InitWin(l2); 2222 InitWin(l2);
2223 l2->l2m.fsm = &l2fsm; 2223 l2->l2m.fsm = &l2fsm;
2224 if (test_bit(FLG_LAPB, &l2->flag) || 2224 if (test_bit(FLG_LAPB, &l2->flag) ||
2225 test_bit(FLG_PTP, &l2->flag) || 2225 test_bit(FLG_FIXED_TEI, &l2->flag) ||
2226 test_bit(FLG_LAPD_NET, &l2->flag)) 2226 test_bit(FLG_LAPD_NET, &l2->flag))
2227 l2->l2m.state = ST_L2_4; 2227 l2->l2m.state = ST_L2_4;
2228 else 2228 else
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 6157cbbf4113..363975b3c925 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -224,7 +224,7 @@ void led_trigger_event(struct led_trigger *trig,
224 struct led_classdev *led_cdev; 224 struct led_classdev *led_cdev;
225 225
226 led_cdev = list_entry(entry, struct led_classdev, trig_list); 226 led_cdev = list_entry(entry, struct led_classdev, trig_list);
227 led_set_brightness(led_cdev, brightness); 227 __led_set_brightness(led_cdev, brightness);
228 } 228 }
229 read_unlock(&trig->leddev_list_lock); 229 read_unlock(&trig->leddev_list_lock);
230} 230}
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 53bd136f1ef0..0ade6ebfc914 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -63,7 +63,7 @@ static int lp8788_led_init_device(struct lp8788_led *led,
63 /* scale configuration */ 63 /* scale configuration */
64 addr = LP8788_ISINK_CTRL; 64 addr = LP8788_ISINK_CTRL;
65 mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET); 65 mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
66 val = cfg->scale << cfg->num; 66 val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
67 ret = lp8788_update_bits(led->lp, addr, mask, val); 67 ret = lp8788_update_bits(led->lp, addr, mask, val);
68 if (ret) 68 if (ret)
69 return ret; 69 return ret;
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index 9ee12c28059a..771ea067e680 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -247,7 +247,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
247 247
248 if (!cfg) { 248 if (!cfg) {
249 dev_err(&pdev->dev, "missing platform data\n"); 249 dev_err(&pdev->dev, "missing platform data\n");
250 goto err0; 250 return -ENODEV;
251 } 251 }
252 252
253 p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); 253 p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index cfff454f628b..c3bb304eca07 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -19,14 +19,13 @@
19#include <linux/mtd/map.h> 19#include <linux/mtd/map.h>
20#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/sections.h>
22 23
23/****************************************************************************/ 24/****************************************************************************/
24 25
25extern char _ebss;
26
27struct map_info uclinux_ram_map = { 26struct map_info uclinux_ram_map = {
28 .name = "RAM", 27 .name = "RAM",
29 .phys = (unsigned long)&_ebss, 28 .phys = (unsigned long)__bss_stop,
30 .size = 0, 29 .size = 0,
31}; 30};
32 31
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 545c09ed9079..cff6f023c03a 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -996,9 +996,7 @@ static int __init cops_module_init(void)
996 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", 996 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
997 cardname); 997 cardname);
998 cops_dev = cops_probe(-1); 998 cops_dev = cops_probe(-1);
999 if (IS_ERR(cops_dev)) 999 return PTR_RET(cops_dev);
1000 return PTR_ERR(cops_dev);
1001 return 0;
1002} 1000}
1003 1001
1004static void __exit cops_module_exit(void) 1002static void __exit cops_module_exit(void)
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 0910dce3996d..b5782cdf0bca 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void)
1243 "ltpc: Autoprobing is not recommended for modules\n"); 1243 "ltpc: Autoprobing is not recommended for modules\n");
1244 1244
1245 dev_ltpc = ltpc_probe(); 1245 dev_ltpc = ltpc_probe();
1246 if (IS_ERR(dev_ltpc)) 1246 return PTR_RET(dev_ltpc);
1247 return PTR_ERR(dev_ltpc);
1248 return 0;
1249} 1247}
1250module_init(ltpc_module_init); 1248module_init(ltpc_module_init);
1251#endif 1249#endif
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index f0c8bd54ce29..021d69c5d9bc 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1712,7 +1712,7 @@ e100_set_network_leds(int active)
1712static void 1712static void
1713e100_netpoll(struct net_device* netdev) 1713e100_netpoll(struct net_device* netdev)
1714{ 1714{
1715 e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); 1715 e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
1716} 1716}
1717#endif 1717#endif
1718 1718
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 77bcd4cb4ffb..463b9ec57d80 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1278,7 +1278,7 @@ struct bnx2x {
1278#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) 1278#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
1279 1279
1280#define BNX2X_FW_RX_ALIGN_END \ 1280#define BNX2X_FW_RX_ALIGN_END \
1281 max(1UL << BNX2X_RX_ALIGN_SHIFT, \ 1281 max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \
1282 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 1282 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
1283 1283
1284#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) 1284#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index dd451c3dd83d..02b5a343b195 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4041,20 +4041,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4041 return val != 0; 4041 return val != 0;
4042} 4042}
4043 4043
4044/*
4045 * Reset the load status for the current engine.
4046 */
4047static void bnx2x_clear_load_status(struct bnx2x *bp)
4048{
4049 u32 val;
4050 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4051 BNX2X_PATH0_LOAD_CNT_MASK);
4052 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4053 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4054 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
4055 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4056}
4057
4058static void _print_next_block(int idx, const char *blk) 4044static void _print_next_block(int idx, const char *blk)
4059{ 4045{
4060 pr_cont("%s%s", idx ? ", " : "", blk); 4046 pr_cont("%s%s", idx ? ", " : "", blk);
@@ -9384,32 +9370,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
9384 return rc; 9370 return rc;
9385} 9371}
9386 9372
9387static bool __devinit bnx2x_can_flr(struct bnx2x *bp)
9388{
9389 int pos;
9390 u32 cap;
9391 struct pci_dev *dev = bp->pdev;
9392
9393 pos = pci_pcie_cap(dev);
9394 if (!pos)
9395 return false;
9396
9397 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
9398 if (!(cap & PCI_EXP_DEVCAP_FLR))
9399 return false;
9400
9401 return true;
9402}
9403
9404static int __devinit bnx2x_do_flr(struct bnx2x *bp) 9373static int __devinit bnx2x_do_flr(struct bnx2x *bp)
9405{ 9374{
9406 int i, pos; 9375 int i, pos;
9407 u16 status; 9376 u16 status;
9408 struct pci_dev *dev = bp->pdev; 9377 struct pci_dev *dev = bp->pdev;
9409 9378
9410 /* probe the capability first */ 9379
9411 if (bnx2x_can_flr(bp)) 9380 if (CHIP_IS_E1x(bp)) {
9412 return -ENOTTY; 9381 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
9382 return -EINVAL;
9383 }
9384
9385 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
9386 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
9387 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
9388 bp->common.bc_ver);
9389 return -EINVAL;
9390 }
9413 9391
9414 pos = pci_pcie_cap(dev); 9392 pos = pci_pcie_cap(dev);
9415 if (!pos) 9393 if (!pos)
@@ -9429,12 +9407,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp)
9429 "transaction is not cleared; proceeding with reset anyway\n"); 9407 "transaction is not cleared; proceeding with reset anyway\n");
9430 9408
9431clear: 9409clear:
9432 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
9433 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
9434 bp->common.bc_ver);
9435 return -EINVAL;
9436 }
9437 9410
9411 BNX2X_DEV_INFO("Initiating FLR\n");
9438 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); 9412 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
9439 9413
9440 return 0; 9414 return 0;
@@ -9454,8 +9428,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
9454 * the one required, then FLR will be sufficient to clean any residue 9428 * the one required, then FLR will be sufficient to clean any residue
9455 * left by previous driver 9429 * left by previous driver
9456 */ 9430 */
9457 if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp)) 9431 rc = bnx2x_test_firmware_version(bp, false);
9458 return bnx2x_do_flr(bp); 9432
9433 if (!rc) {
9434 /* fw version is good */
9435 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
9436 rc = bnx2x_do_flr(bp);
9437 }
9438
9439 if (!rc) {
9440 /* FLR was performed */
9441 BNX2X_DEV_INFO("FLR successful\n");
9442 return 0;
9443 }
9444
9445 BNX2X_DEV_INFO("Could not FLR\n");
9459 9446
9460 /* Close the MCP request, return failure*/ 9447 /* Close the MCP request, return failure*/
9461 rc = bnx2x_prev_mcp_done(bp); 9448 rc = bnx2x_prev_mcp_done(bp);
@@ -11427,9 +11414,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11427 if (!chip_is_e1x) 11414 if (!chip_is_e1x)
11428 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 11415 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
11429 11416
11430 /* Reset the load counter */
11431 bnx2x_clear_load_status(bp);
11432
11433 dev->watchdog_timeo = TX_TIMEOUT; 11417 dev->watchdog_timeo = TX_TIMEOUT;
11434 11418
11435 dev->netdev_ops = &bnx2x_netdev_ops; 11419 dev->netdev_ops = &bnx2x_netdev_ops;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 734fd87cd990..62f754bd0dfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2485 break; 2485 break;
2486 2486
2487 default: 2487 default:
2488 kfree(new_cmd);
2488 BNX2X_ERR("Unknown command: %d\n", cmd); 2489 BNX2X_ERR("Unknown command: %d\n", cmd);
2489 return -EINVAL; 2490 return -EINVAL;
2490 } 2491 }
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c60de89b6669..90a903d83d87 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
1948 1948
1949 if (adapter->num_rx_qs != MAX_RX_QS) 1949 if (adapter->num_rx_qs != MAX_RX_QS)
1950 dev_info(&adapter->pdev->dev, 1950 dev_info(&adapter->pdev->dev,
1951 "Created only %d receive queues", adapter->num_rx_qs); 1951 "Created only %d receive queues\n", adapter->num_rx_qs);
1952 1952
1953 return 0; 1953 return 0;
1954} 1954}
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 0b3bade957fd..080c89093feb 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
999 **/ 999 **/
1000static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 1000static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1001{ 1001{
1002 u32 ctrl, ctrl_ext, eecd; 1002 u32 ctrl, ctrl_ext, eecd, tctl;
1003 s32 ret_val; 1003 s32 ret_val;
1004 1004
1005 /* 1005 /*
@@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1014 ew32(IMC, 0xffffffff); 1014 ew32(IMC, 0xffffffff);
1015 1015
1016 ew32(RCTL, 0); 1016 ew32(RCTL, 0);
1017 ew32(TCTL, E1000_TCTL_PSP); 1017 tctl = er32(TCTL);
1018 tctl &= ~E1000_TCTL_EN;
1019 ew32(TCTL, tctl);
1018 e1e_flush(); 1020 e1e_flush();
1019 1021
1020 usleep_range(10000, 20000); 1022 usleep_range(10000, 20000);
@@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1601 * auto-negotiation in the TXCW register and disable 1603 * auto-negotiation in the TXCW register and disable
1602 * forced link in the Device Control register in an 1604 * forced link in the Device Control register in an
1603 * attempt to auto-negotiate with our link partner. 1605 * attempt to auto-negotiate with our link partner.
1604 * If the partner code word is null, stop forcing
1605 * and restart auto negotiation.
1606 */ 1606 */
1607 if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { 1607 if (rxcw & E1000_RXCW_C) {
1608 /* Enable autoneg, and unforce link up */ 1608 /* Enable autoneg, and unforce link up */
1609 ew32(TXCW, mac->txcw); 1609 ew32(TXCW, mac->txcw);
1610 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 1610 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 95b245310f17..46c3b1f9ff89 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
178 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); 178 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
179} 179}
180 180
181static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
182 struct e1000_buffer *bi)
183{
184 int i;
185 struct e1000_ps_page *ps_page;
186
187 for (i = 0; i < adapter->rx_ps_pages; i++) {
188 ps_page = &bi->ps_pages[i];
189
190 if (ps_page->page) {
191 pr_info("packet dump for ps_page %d:\n", i);
192 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
193 16, 1, page_address(ps_page->page),
194 PAGE_SIZE, true);
195 }
196 }
197}
198
181/* 199/*
182 * e1000e_dump - Print registers, Tx-ring and Rx-ring 200 * e1000e_dump - Print registers, Tx-ring and Rx-ring
183 */ 201 */
@@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
299 (unsigned long long)buffer_info->time_stamp, 317 (unsigned long long)buffer_info->time_stamp,
300 buffer_info->skb, next_desc); 318 buffer_info->skb, next_desc);
301 319
302 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) 320 if (netif_msg_pktdata(adapter) && buffer_info->skb)
303 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 321 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
304 16, 1, phys_to_virt(buffer_info->dma), 322 16, 1, buffer_info->skb->data,
305 buffer_info->length, true); 323 buffer_info->skb->len, true);
306 } 324 }
307 325
308 /* Print Rx Ring Summary */ 326 /* Print Rx Ring Summary */
@@ -381,10 +399,8 @@ rx_ring_summary:
381 buffer_info->skb, next_desc); 399 buffer_info->skb, next_desc);
382 400
383 if (netif_msg_pktdata(adapter)) 401 if (netif_msg_pktdata(adapter))
384 print_hex_dump(KERN_INFO, "", 402 e1000e_dump_ps_pages(adapter,
385 DUMP_PREFIX_ADDRESS, 16, 1, 403 buffer_info);
386 phys_to_virt(buffer_info->dma),
387 adapter->rx_ps_bsize0, true);
388 } 404 }
389 } 405 }
390 break; 406 break;
@@ -444,12 +460,12 @@ rx_ring_summary:
444 (unsigned long long)buffer_info->dma, 460 (unsigned long long)buffer_info->dma,
445 buffer_info->skb, next_desc); 461 buffer_info->skb, next_desc);
446 462
447 if (netif_msg_pktdata(adapter)) 463 if (netif_msg_pktdata(adapter) &&
464 buffer_info->skb)
448 print_hex_dump(KERN_INFO, "", 465 print_hex_dump(KERN_INFO, "",
449 DUMP_PREFIX_ADDRESS, 16, 466 DUMP_PREFIX_ADDRESS, 16,
450 1, 467 1,
451 phys_to_virt 468 buffer_info->skb->data,
452 (buffer_info->dma),
453 adapter->rx_buffer_len, 469 adapter->rx_buffer_len,
454 true); 470 true);
455 } 471 }
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 5e84eaac48c1..ba994fb4cec6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
254 */ 254 */
255 size += NVM_WORD_SIZE_BASE_SHIFT; 255 size += NVM_WORD_SIZE_BASE_SHIFT;
256 256
257 /*
258 * Check for invalid size
259 */
260 if ((hw->mac.type == e1000_82576) && (size > 15)) {
261 pr_notice("The NVM size is not valid, defaulting to 32K\n");
262 size = 15;
263 }
264
257 nvm->word_size = 1 << size; 265 nvm->word_size = 1 << size;
258 if (hw->mac.type < e1000_i210) { 266 if (hw->mac.type < e1000_i210) {
259 nvm->opcode_bits = 8; 267 nvm->opcode_bits = 8;
@@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
281 } else 289 } else
282 nvm->type = e1000_nvm_flash_hw; 290 nvm->type = e1000_nvm_flash_hw;
283 291
284 /*
285 * Check for invalid size
286 */
287 if ((hw->mac.type == e1000_82576) && (size > 15)) {
288 pr_notice("The NVM size is not valid, defaulting to 32K\n");
289 size = 15;
290 }
291
292 /* NVM Function Pointers */ 292 /* NVM Function Pointers */
293 switch (hw->mac.type) { 293 switch (hw->mac.type) {
294 case e1000_82580: 294 case e1000_82580:
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 10efcd88dca0..28394bea5253 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -156,8 +156,12 @@
156 : (0x0E018 + ((_n) * 0x40))) 156 : (0x0E018 + ((_n) * 0x40)))
157#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ 157#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
158 : (0x0E028 + ((_n) * 0x40))) 158 : (0x0E028 + ((_n) * 0x40)))
159#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) 159#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
160#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) 160 (0x0C014 + ((_n) * 0x40)))
161#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
162#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
163 (0x0E014 + ((_n) * 0x40)))
164#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
161#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ 165#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
162 : (0x0E038 + ((_n) * 0x40))) 166 : (0x0E038 + ((_n) * 0x40)))
163#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ 167#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a19c84cad0e9..70591117051b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
209 /* When SoL/IDER sessions are active, autoneg/speed/duplex 209 /* When SoL/IDER sessions are active, autoneg/speed/duplex
210 * cannot be changed */ 210 * cannot be changed */
211 if (igb_check_reset_block(hw)) { 211 if (igb_check_reset_block(hw)) {
212 dev_err(&adapter->pdev->dev, "Cannot change link " 212 dev_err(&adapter->pdev->dev,
213 "characteristics when SoL/IDER is active.\n"); 213 "Cannot change link characteristics when SoL/IDER is active.\n");
214 return -EINVAL; 214 return -EINVAL;
215 } 215 }
216 216
@@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
1089 wr32(reg, (_test[pat] & write)); 1089 wr32(reg, (_test[pat] & write));
1090 val = rd32(reg) & mask; 1090 val = rd32(reg) & mask;
1091 if (val != (_test[pat] & write & mask)) { 1091 if (val != (_test[pat] & write & mask)) {
1092 dev_err(&adapter->pdev->dev, "pattern test reg %04X " 1092 dev_err(&adapter->pdev->dev,
1093 "failed: got 0x%08X expected 0x%08X\n", 1093 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1094 reg, val, (_test[pat] & write & mask)); 1094 reg, val, (_test[pat] & write & mask));
1095 *data = reg; 1095 *data = reg;
1096 return 1; 1096 return 1;
@@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
1108 wr32(reg, write & mask); 1108 wr32(reg, write & mask);
1109 val = rd32(reg); 1109 val = rd32(reg);
1110 if ((write & mask) != (val & mask)) { 1110 if ((write & mask) != (val & mask)) {
1111 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" 1111 dev_err(&adapter->pdev->dev,
1112 " got 0x%08X expected 0x%08X\n", reg, 1112 "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
1113 (val & mask), (write & mask)); 1113 (val & mask), (write & mask));
1114 *data = reg; 1114 *data = reg;
1115 return 1; 1115 return 1;
@@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1171 wr32(E1000_STATUS, toggle); 1171 wr32(E1000_STATUS, toggle);
1172 after = rd32(E1000_STATUS) & toggle; 1172 after = rd32(E1000_STATUS) & toggle;
1173 if (value != after) { 1173 if (value != after) {
1174 dev_err(&adapter->pdev->dev, "failed STATUS register test " 1174 dev_err(&adapter->pdev->dev,
1175 "got: 0x%08X expected: 0x%08X\n", after, value); 1175 "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1176 after, value);
1176 *data = 1; 1177 *data = 1;
1177 return 1; 1178 return 1;
1178 } 1179 }
@@ -1497,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1497 break; 1498 break;
1498 } 1499 }
1499 1500
1501 /* add small delay to avoid loopback test failure */
1502 msleep(50);
1503
1500 /* force 1000, set loopback */ 1504 /* force 1000, set loopback */
1501 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); 1505 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1502 1506
@@ -1777,16 +1781,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1777 * sessions are active */ 1781 * sessions are active */
1778 if (igb_check_reset_block(&adapter->hw)) { 1782 if (igb_check_reset_block(&adapter->hw)) {
1779 dev_err(&adapter->pdev->dev, 1783 dev_err(&adapter->pdev->dev,
1780 "Cannot do PHY loopback test " 1784 "Cannot do PHY loopback test when SoL/IDER is active.\n");
1781 "when SoL/IDER is active.\n");
1782 *data = 0; 1785 *data = 0;
1783 goto out; 1786 goto out;
1784 } 1787 }
1785 if ((adapter->hw.mac.type == e1000_i210) 1788 if ((adapter->hw.mac.type == e1000_i210)
1786 || (adapter->hw.mac.type == e1000_i210)) { 1789 || (adapter->hw.mac.type == e1000_i211)) {
1787 dev_err(&adapter->pdev->dev, 1790 dev_err(&adapter->pdev->dev,
1788 "Loopback test not supported " 1791 "Loopback test not supported on this part at this time.\n");
1789 "on this part at this time.\n");
1790 *data = 0; 1792 *data = 0;
1791 goto out; 1793 goto out;
1792 } 1794 }
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b7c2d5050572..48cc4fb1a307 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter)
462 (u64)buffer_info->time_stamp, 462 (u64)buffer_info->time_stamp,
463 buffer_info->skb, next_desc); 463 buffer_info->skb, next_desc);
464 464
465 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) 465 if (netif_msg_pktdata(adapter) && buffer_info->skb)
466 print_hex_dump(KERN_INFO, "", 466 print_hex_dump(KERN_INFO, "",
467 DUMP_PREFIX_ADDRESS, 467 DUMP_PREFIX_ADDRESS,
468 16, 1, phys_to_virt(buffer_info->dma), 468 16, 1, buffer_info->skb->data,
469 buffer_info->length, true); 469 buffer_info->length, true);
470 } 470 }
471 } 471 }
@@ -547,18 +547,17 @@ rx_ring_summary:
547 (u64)buffer_info->dma, 547 (u64)buffer_info->dma,
548 buffer_info->skb, next_desc); 548 buffer_info->skb, next_desc);
549 549
550 if (netif_msg_pktdata(adapter)) { 550 if (netif_msg_pktdata(adapter) &&
551 buffer_info->dma && buffer_info->skb) {
551 print_hex_dump(KERN_INFO, "", 552 print_hex_dump(KERN_INFO, "",
552 DUMP_PREFIX_ADDRESS, 553 DUMP_PREFIX_ADDRESS,
553 16, 1, 554 16, 1, buffer_info->skb->data,
554 phys_to_virt(buffer_info->dma), 555 IGB_RX_HDR_LEN, true);
555 IGB_RX_HDR_LEN, true);
556 print_hex_dump(KERN_INFO, "", 556 print_hex_dump(KERN_INFO, "",
557 DUMP_PREFIX_ADDRESS, 557 DUMP_PREFIX_ADDRESS,
558 16, 1, 558 16, 1,
559 phys_to_virt( 559 page_address(buffer_info->page) +
560 buffer_info->page_dma + 560 buffer_info->page_offset,
561 buffer_info->page_offset),
562 PAGE_SIZE/2, true); 561 PAGE_SIZE/2, true);
563 } 562 }
564 } 563 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 50fc137501da..18bf08c9d7a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
804 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 804 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
805 /* Set KX4/KX/KR support according to speed requested */ 805 /* Set KX4/KX/KR support according to speed requested */
806 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 806 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
807 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 807 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
808 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 808 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
809 autoc |= IXGBE_AUTOC_KX4_SUPP; 809 autoc |= IXGBE_AUTOC_KX4_SUPP;
810 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 810 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
811 (hw->phy.smart_speed_active == false)) 811 (hw->phy.smart_speed_active == false))
812 autoc |= IXGBE_AUTOC_KR_SUPP; 812 autoc |= IXGBE_AUTOC_KR_SUPP;
813 }
813 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 814 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
814 autoc |= IXGBE_AUTOC_KX_SUPP; 815 autoc |= IXGBE_AUTOC_KX_SUPP;
815 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 816 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f32e70300770..5aba5ecdf1e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
614 /* If source MAC is equal to our own MAC and not performing 614 /* If source MAC is equal to our own MAC and not performing
615 * the selftest or flb disabled - drop the packet */ 615 * the selftest or flb disabled - drop the packet */
616 if (s_mac == priv->mac && 616 if (s_mac == priv->mac &&
617 (!(dev->features & NETIF_F_LOOPBACK) || 617 !((dev->features & NETIF_F_LOOPBACK) ||
618 !priv->validate_loopback)) 618 priv->validate_loopback))
619 goto next; 619 goto next;
620 620
621 /* 621 /*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 019d856b1334..10bba09c44ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
164 ring->cons = 0xffffffff; 164 ring->cons = 0xffffffff;
165 ring->last_nr_txbb = 1; 165 ring->last_nr_txbb = 1;
166 ring->poll_cnt = 0; 166 ring->poll_cnt = 0;
167 ring->blocked = 0;
168 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); 167 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
169 memset(ring->buf, 0, ring->buf_size); 168 memset(ring->buf, 0, ring->buf_size);
170 169
@@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
365 ring->cons += txbbs_skipped; 364 ring->cons += txbbs_skipped;
366 netdev_tx_completed_queue(ring->tx_queue, packets, bytes); 365 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
367 366
368 /* Wakeup Tx queue if this ring stopped it */ 367 /*
369 if (unlikely(ring->blocked)) { 368 * Wakeup Tx queue if this stopped, and at least 1 packet
370 if ((u32) (ring->prod - ring->cons) <= 369 * was completed
371 ring->size - HEADROOM - MAX_DESC_TXBBS) { 370 */
372 ring->blocked = 0; 371 if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
373 netif_tx_wake_queue(ring->tx_queue); 372 netif_tx_wake_queue(ring->tx_queue);
374 priv->port_stats.wake_queue++; 373 priv->port_stats.wake_queue++;
375 }
376 } 374 }
377} 375}
378 376
@@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
592 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 590 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
593 /* every full Tx ring stops queue */ 591 /* every full Tx ring stops queue */
594 netif_tx_stop_queue(ring->tx_queue); 592 netif_tx_stop_queue(ring->tx_queue);
595 ring->blocked = 1;
596 priv->port_stats.queue_stopped++; 593 priv->port_stats.queue_stopped++;
597 594
598 return NETDEV_TX_BUSY; 595 return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 48d0e90194cb..827b72dfce99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
157 "on this HCA, aborting.\n"); 157 "on this HCA, aborting.\n");
158 return -EINVAL; 158 return -EINVAL;
159 } 159 }
160 if (port_type[i] == MLX4_PORT_TYPE_ETH &&
161 port_type[i + 1] == MLX4_PORT_TYPE_IB)
162 return -EINVAL;
163 } 160 }
164 } 161 }
165 162
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5f1ab105debc..9d27e42264e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -248,7 +248,6 @@ struct mlx4_en_tx_ring {
248 u32 doorbell_qpn; 248 u32 doorbell_qpn;
249 void *buf; 249 void *buf;
250 u16 poll_cnt; 250 u16 poll_cnt;
251 int blocked;
252 struct mlx4_en_tx_info *tx_info; 251 struct mlx4_en_tx_info *tx_info;
253 u8 *bounce_buf; 252 u8 *bounce_buf;
254 u32 last_nr_txbb; 253 u32 last_nr_txbb;
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index 802498293528..34ee09bae36e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -81,20 +81,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev,
81 } 81 }
82 82
83 /* 83 /*
84 * Adjust port configuration:
85 * If port 1 sensed nothing and port 2 is IB, set both as IB
86 * If port 2 sensed nothing and port 1 is Eth, set both as Eth
87 */
88 if (stype[0] == MLX4_PORT_TYPE_ETH) {
89 for (i = 1; i < dev->caps.num_ports; i++)
90 stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
91 }
92 if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
93 for (i = 0; i < dev->caps.num_ports - 1; i++)
94 stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
95 }
96
97 /*
98 * If sensed nothing, remain in current configuration. 84 * If sensed nothing, remain in current configuration.
99 */ 85 */
100 for (i = 0; i < dev->caps.num_ports; i++) 86 for (i = 0; i < dev->caps.num_ports; i++)
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 4069edab229e..53743f7a2ca9 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev)
346 "phy-mode", NULL); 346 "phy-mode", NULL);
347 if (mode && !strcmp(mode, "mii")) 347 if (mode && !strcmp(mode, "mii"))
348 return PHY_INTERFACE_MODE_MII; 348 return PHY_INTERFACE_MODE_MII;
349 return PHY_INTERFACE_MODE_RMII;
350 } 349 }
351
352 /* non-DT */
353#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
354 return PHY_INTERFACE_MODE_MII;
355#else
356 return PHY_INTERFACE_MODE_RMII; 350 return PHY_INTERFACE_MODE_RMII;
357#endif
358} 351}
359 352
360static bool use_iram_for_net(struct device *dev) 353static bool use_iram_for_net(struct device *dev)
361{ 354{
362 if (dev && dev->of_node) 355 if (dev && dev->of_node)
363 return of_property_read_bool(dev->of_node, "use-iram"); 356 return of_property_read_bool(dev->of_node, "use-iram");
364
365 /* non-DT */
366#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
367 return true;
368#else
369 return false; 357 return false;
370#endif
371} 358}
372 359
373/* Receive Status information word */ 360/* Receive Status information word */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 70554a1b2b02..65a8d49106a4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx)
1503 goto fail2; 1503 goto fail2;
1504 } 1504 }
1505 1505
1506 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1507 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1508 rc = -EINVAL;
1509 goto fail3;
1510 }
1506 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; 1511 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1507 1512
1508 rc = efx_probe_filters(efx); 1513 rc = efx_probe_filters(efx);
@@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx)
2070 net_dev->irq = efx->pci_dev->irq; 2075 net_dev->irq = efx->pci_dev->irq;
2071 net_dev->netdev_ops = &efx_netdev_ops; 2076 net_dev->netdev_ops = &efx_netdev_ops;
2072 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 2077 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
2078 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2073 2079
2074 rtnl_lock(); 2080 rtnl_lock();
2075 2081
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index be8f9158a714..70755c97251a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -30,6 +30,7 @@ extern netdev_tx_t
30efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 30efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
31extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 31extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
32extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); 32extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
33extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
33 34
34/* RX */ 35/* RX */
35extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 36extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
52#define EFX_MAX_EVQ_SIZE 16384UL 53#define EFX_MAX_EVQ_SIZE 16384UL
53#define EFX_MIN_EVQ_SIZE 512UL 54#define EFX_MIN_EVQ_SIZE 512UL
54 55
55/* The smallest [rt]xq_entries that the driver supports. Callers of 56/* Maximum number of TCP segments we support for soft-TSO */
56 * efx_wake_queue() assume that they can subsequently send at least one 57#define EFX_TSO_MAX_SEGS 100
57 * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ 58
58#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) 59/* The smallest [rt]xq_entries that the driver supports. RX minimum
60 * is a bit arbitrary. For TX, we must have space for at least 2
61 * TSO skbs.
62 */
63#define EFX_RXQ_MIN_ENT 128U
64#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
59 65
60/* Filters */ 66/* Filters */
61extern int efx_probe_filters(struct efx_nic *efx); 67extern int efx_probe_filters(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 10536f93b561..8cba2df82b18 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
680 struct ethtool_ringparam *ring) 680 struct ethtool_ringparam *ring)
681{ 681{
682 struct efx_nic *efx = netdev_priv(net_dev); 682 struct efx_nic *efx = netdev_priv(net_dev);
683 u32 txq_entries;
683 684
684 if (ring->rx_mini_pending || ring->rx_jumbo_pending || 685 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
685 ring->rx_pending > EFX_MAX_DMAQ_SIZE || 686 ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
686 ring->tx_pending > EFX_MAX_DMAQ_SIZE) 687 ring->tx_pending > EFX_MAX_DMAQ_SIZE)
687 return -EINVAL; 688 return -EINVAL;
688 689
689 if (ring->rx_pending < EFX_MIN_RING_SIZE || 690 if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
690 ring->tx_pending < EFX_MIN_RING_SIZE) {
691 netif_err(efx, drv, efx->net_dev, 691 netif_err(efx, drv, efx->net_dev,
692 "TX and RX queues cannot be smaller than %ld\n", 692 "RX queues cannot be smaller than %u\n",
693 EFX_MIN_RING_SIZE); 693 EFX_RXQ_MIN_ENT);
694 return -EINVAL; 694 return -EINVAL;
695 } 695 }
696 696
697 return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); 697 txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
698 if (txq_entries != ring->tx_pending)
699 netif_warn(efx, drv, efx->net_dev,
700 "increasing TX queue size to minimum of %u\n",
701 txq_entries);
702
703 return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
698} 704}
699 705
700static int efx_ethtool_set_pauseparam(struct net_device *net_dev, 706static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9b225a7769f7..18713436b443 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
119 return len; 119 return len;
120} 120}
121 121
122unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
123{
124 /* Header and payload descriptor for each output segment, plus
125 * one for every input fragment boundary within a segment
126 */
127 unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
128
129 /* Possibly one more per segment for the alignment workaround */
130 if (EFX_WORKAROUND_5391(efx))
131 max_descs += EFX_TSO_MAX_SEGS;
132
133 /* Possibly more for PCIe page boundaries within input fragments */
134 if (PAGE_SIZE > EFX_PAGE_SIZE)
135 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
136 DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
137
138 return max_descs;
139}
140
122/* 141/*
123 * Add a socket buffer to a TX queue 142 * Add a socket buffer to a TX queue
124 * 143 *
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cd01ee7ecef1..b93245c11995 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
74 * the necessary resources and invokes the main to init 74 * the necessary resources and invokes the main to init
75 * the net device, register the mdio bus etc. 75 * the net device, register the mdio bus etc.
76 */ 76 */
77static int stmmac_pltfr_probe(struct platform_device *pdev) 77static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
78{ 78{
79 int ret = 0; 79 int ret = 0;
80 struct resource *res; 80 struct resource *res;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 482648fcf0b6..98934bdf6acf 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev)
1003} 1003}
1004 1004
1005int ixp46x_phc_index = -1; 1005int ixp46x_phc_index = -1;
1006EXPORT_SYMBOL_GPL(ixp46x_phc_index);
1006 1007
1007static int ixp4xx_get_ts_info(struct net_device *dev, 1008static int ixp4xx_get_ts_info(struct net_device *dev,
1008 struct ethtool_ts_info *info) 1009 struct ethtool_ts_info *info)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6cee2917eb02..4a1a5f58fa73 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device)
383 unsigned long flags; 383 unsigned long flags;
384 384
385 net_device = hv_get_drvdata(device); 385 net_device = hv_get_drvdata(device);
386 spin_lock_irqsave(&device->channel->inbound_lock, flags);
387 net_device->destroy = true;
388 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
389
390 /* Wait for all send completions */
391 wait_event(net_device->wait_drain,
392 atomic_read(&net_device->num_outstanding_sends) == 0);
393 386
394 netvsc_disconnect_vsp(net_device); 387 netvsc_disconnect_vsp(net_device);
395 388
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index e5d6146937fa..1e88a1095934 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -718,6 +718,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
718{ 718{
719 struct rndis_request *request; 719 struct rndis_request *request;
720 struct rndis_halt_request *halt; 720 struct rndis_halt_request *halt;
721 struct netvsc_device *nvdev = dev->net_dev;
722 struct hv_device *hdev = nvdev->dev;
723 ulong flags;
721 724
722 /* Attempt to do a rndis device halt */ 725 /* Attempt to do a rndis device halt */
723 request = get_rndis_request(dev, RNDIS_MSG_HALT, 726 request = get_rndis_request(dev, RNDIS_MSG_HALT,
@@ -735,6 +738,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
735 dev->state = RNDIS_DEV_UNINITIALIZED; 738 dev->state = RNDIS_DEV_UNINITIALIZED;
736 739
737cleanup: 740cleanup:
741 spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
742 nvdev->destroy = true;
743 spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
744
745 /* Wait for all send completions */
746 wait_event(nvdev->wait_drain,
747 atomic_read(&nvdev->num_outstanding_sends) == 0);
748
738 if (request) 749 if (request)
739 put_rndis_request(dev, request); 750 put_rndis_request(dev, request);
740 return; 751 return;
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index a561ae44a9ac..c6a0299aa9f9 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
158 /* If not add the 'RPOLC', we can't catch the receive interrupt. 158 /* If not add the 'RPOLC', we can't catch the receive interrupt.
159 * It's related with the HW layout and the IR transiver. 159 * It's related with the HW layout and the IR transiver.
160 */ 160 */
161 val |= IREN | RPOLC; 161 val |= UMOD_IRDA | RPOLC;
162 UART_PUT_GCTL(port, val); 162 UART_PUT_GCTL(port, val);
163 return ret; 163 return ret;
164} 164}
@@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev
432 bfin_sir_stop_rx(port); 432 bfin_sir_stop_rx(port);
433 433
434 val = UART_GET_GCTL(port); 434 val = UART_GET_GCTL(port);
435 val &= ~(UCEN | IREN | RPOLC); 435 val &= ~(UCEN | UMOD_MASK | RPOLC);
436 UART_PUT_GCTL(port, val); 436 UART_PUT_GCTL(port, val);
437 437
438#ifdef CONFIG_SIR_BFIN_DMA 438#ifdef CONFIG_SIR_BFIN_DMA
@@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work)
518 * reset all the UART. 518 * reset all the UART.
519 */ 519 */
520 val = UART_GET_GCTL(port); 520 val = UART_GET_GCTL(port);
521 val &= ~(IREN | RPOLC); 521 val &= ~(UMOD_MASK | RPOLC);
522 UART_PUT_GCTL(port, val); 522 UART_PUT_GCTL(port, val);
523 SSYNC(); 523 SSYNC();
524 val |= IREN | RPOLC; 524 val |= UMOD_IRDA | RPOLC;
525 UART_PUT_GCTL(port, val); 525 UART_PUT_GCTL(port, val);
526 SSYNC(); 526 SSYNC();
527 /* bfin_sir_set_speed(port, self->speed); */ 527 /* bfin_sir_set_speed(port, self->speed); */
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0737bd4d1669..0f0f9ce3a776 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
94 int i; 94 int i;
95 95
96 for (i = 0; i < MAX_MACVTAP_QUEUES; i++) { 96 for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
97 if (rcu_dereference(vlan->taps[i]) == q) 97 if (rcu_dereference_protected(vlan->taps[i],
98 lockdep_is_held(&macvtap_lock)) == q)
98 return i; 99 return i;
99 } 100 }
100 101
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index e0cc4ef33dee..eefe49e8713c 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -101,7 +101,6 @@ err:
101 n--; 101 n--;
102 gpio_free(s->gpio[n]); 102 gpio_free(s->gpio[n]);
103 } 103 }
104 devm_kfree(&pdev->dev, s);
105 return r; 104 return r;
106} 105}
107 106
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1c98321b56cc..162464fe86bf 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
189 if (sk_pppox(po)->sk_state & PPPOX_DEAD) 189 if (sk_pppox(po)->sk_state & PPPOX_DEAD)
190 goto tx_error; 190 goto tx_error;
191 191
192 rt = ip_route_output_ports(&init_net, &fl4, NULL, 192 rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
193 opt->dst_addr.sin_addr.s_addr, 193 opt->dst_addr.sin_addr.s_addr,
194 opt->src_addr.sin_addr.s_addr, 194 opt->src_addr.sin_addr.s_addr,
195 0, 0, IPPROTO_GRE, 195 0, 0, IPPROTO_GRE,
@@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
468 po->chan.private = sk; 468 po->chan.private = sk;
469 po->chan.ops = &pptp_chan_ops; 469 po->chan.ops = &pptp_chan_ops;
470 470
471 rt = ip_route_output_ports(&init_net, &fl4, sk, 471 rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
472 opt->dst_addr.sin_addr.s_addr, 472 opt->dst_addr.sin_addr.s_addr,
473 opt->src_addr.sin_addr.s_addr, 473 opt->src_addr.sin_addr.s_addr,
474 0, 0, 474 0, 0,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 926d4db5cb38..3a16d4fdaa05 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun)
187 netif_tx_lock_bh(tun->dev); 187 netif_tx_lock_bh(tun->dev);
188 netif_carrier_off(tun->dev); 188 netif_carrier_off(tun->dev);
189 tun->tfile = NULL; 189 tun->tfile = NULL;
190 tun->socket.file = NULL;
191 netif_tx_unlock_bh(tun->dev); 190 netif_tx_unlock_bh(tun->dev);
192 191
193 /* Drop read queue */ 192 /* Drop read queue */
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 64610048ce87..7d78669000d7 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev)
232 struct urb *req = usb_alloc_urb(0, GFP_KERNEL); 232 struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
233 233
234 if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { 234 if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
235 usb_free_urb(req);
235 usbpn_close(dev); 236 usbpn_close(dev);
236 return -ENOMEM; 237 return -ENOMEM;
237 } 238 }
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f4ce5957df32..4cd582a4f625 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = {
1225 .driver_info = (unsigned long) &wwan_info, 1225 .driver_info = (unsigned long) &wwan_info,
1226 }, 1226 },
1227 1227
1228 /* Dell branded MBM devices like DW5550 */
1229 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
1230 | USB_DEVICE_ID_MATCH_VENDOR,
1231 .idVendor = 0x413c,
1232 .bInterfaceClass = USB_CLASS_COMM,
1233 .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
1234 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
1235 .driver_info = (unsigned long) &wwan_info,
1236 },
1237
1238 /* Toshiba branded MBM devices */
1239 { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
1240 | USB_DEVICE_ID_MATCH_VENDOR,
1241 .idVendor = 0x0930,
1242 .bInterfaceClass = USB_CLASS_COMM,
1243 .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
1244 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
1245 .driver_info = (unsigned long) &wwan_info,
1246 },
1247
1228 /* Generic CDC-NCM devices */ 1248 /* Generic CDC-NCM devices */
1229 { USB_INTERFACE_INFO(USB_CLASS_COMM, 1249 { USB_INTERFACE_INFO(USB_CLASS_COMM,
1230 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), 1250 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cfa91ab7acf8..60b6a9daff7e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -730,6 +730,7 @@ int ath9k_hw_init(struct ath_hw *ah)
730 case AR9300_DEVID_QCA955X: 730 case AR9300_DEVID_QCA955X:
731 case AR9300_DEVID_AR9580: 731 case AR9300_DEVID_AR9580:
732 case AR9300_DEVID_AR9462: 732 case AR9300_DEVID_AR9462:
733 case AR9485_DEVID_AR1111:
733 break; 734 break;
734 default: 735 default:
735 if (common->bus_ops->ath_bus_type == ATH_USB) 736 if (common->bus_ops->ath_bus_type == ATH_USB)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dd0c146d81dc..ce7332c64efb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -49,6 +49,7 @@
49#define AR9300_DEVID_AR9462 0x0034 49#define AR9300_DEVID_AR9462 0x0034
50#define AR9300_DEVID_AR9330 0x0035 50#define AR9300_DEVID_AR9330 0x0035
51#define AR9300_DEVID_QCA955X 0x0038 51#define AR9300_DEVID_QCA955X 0x0038
52#define AR9485_DEVID_AR1111 0x0037
52 53
53#define AR5416_AR9100_DEVID 0x000b 54#define AR5416_AR9100_DEVID 0x000b
54 55
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 87b89d55e637..d455de9162ec 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
37 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ 37 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
38 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ 38 { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
39 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ 39 { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
40 { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
40 { 0 } 41 { 0 }
41}; 42};
42 43
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b80352b308d5..a140165dfee0 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev)
2719 if (dev->dev->chip_id == 0x4301) { 2719 if (dev->dev->chip_id == 0x4301) {
2720 mask |= 0x0060; 2720 mask |= 0x0060;
2721 set |= 0x0060; 2721 set |= 0x0060;
2722 } else if (dev->dev->chip_id == 0x5354) {
2723 /* Don't allow overtaking buttons GPIOs */
2724 set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */
2722 } 2725 }
2723 if (dev->dev->chip_id == 0x5354) 2726
2724 set &= 0xff02;
2725 if (0 /* FIXME: conditional unknown */ ) { 2727 if (0 /* FIXME: conditional unknown */ ) {
2726 b43_write16(dev, B43_MMIO_GPIO_MASK, 2728 b43_write16(dev, B43_MMIO_GPIO_MASK,
2727 b43_read16(dev, B43_MMIO_GPIO_MASK) 2729 b43_read16(dev, B43_MMIO_GPIO_MASK)
2728 | 0x0100); 2730 | 0x0100);
2729 mask |= 0x0180; 2731 /* BT Coexistance Input */
2730 set |= 0x0180; 2732 mask |= 0x0080;
2733 set |= 0x0080;
2734 /* BT Coexistance Out */
2735 mask |= 0x0100;
2736 set |= 0x0100;
2731 } 2737 }
2732 if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) { 2738 if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) {
2739 /* PA is controlled by gpio 9, let ucode handle it */
2733 b43_write16(dev, B43_MMIO_GPIO_MASK, 2740 b43_write16(dev, B43_MMIO_GPIO_MASK,
2734 b43_read16(dev, B43_MMIO_GPIO_MASK) 2741 b43_read16(dev, B43_MMIO_GPIO_MASK)
2735 | 0x0200); 2742 | 0x0200);
2736 mask |= 0x0200; 2743 mask |= 0x0200;
2737 set |= 0x0200; 2744 set |= 0x0200;
2738 } 2745 }
2739 if (dev->dev->core_rev >= 2)
2740 mask |= 0x0010; /* FIXME: This is redundant. */
2741 2746
2742 switch (dev->dev->bus_type) { 2747 switch (dev->dev->bus_type) {
2743#ifdef CONFIG_B43_BCMA 2748#ifdef CONFIG_B43_BCMA
2744 case B43_BUS_BCMA: 2749 case B43_BUS_BCMA:
2745 bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL, 2750 bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL,
2746 (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc, 2751 (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc,
2747 BCMA_CC_GPIOCTL) & mask) | set); 2752 BCMA_CC_GPIOCTL) & ~mask) | set);
2748 break; 2753 break;
2749#endif 2754#endif
2750#ifdef CONFIG_B43_SSB 2755#ifdef CONFIG_B43_SSB
@@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev)
2753 if (gpiodev) 2758 if (gpiodev)
2754 ssb_write32(gpiodev, B43_GPIO_CONTROL, 2759 ssb_write32(gpiodev, B43_GPIO_CONTROL,
2755 (ssb_read32(gpiodev, B43_GPIO_CONTROL) 2760 (ssb_read32(gpiodev, B43_GPIO_CONTROL)
2756 & mask) | set); 2761 & ~mask) | set);
2757 break; 2762 break;
2758#endif 2763#endif
2759 } 2764 }
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 9a4c63f927cb..7ed7d7577024 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
382{ 382{
383 struct brcms_c_info *wlc = wlc_cm->wlc; 383 struct brcms_c_info *wlc = wlc_cm->wlc;
384 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; 384 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
385 const struct ieee80211_reg_rule *reg_rule;
386 struct txpwr_limits txpwr; 385 struct txpwr_limits txpwr;
387 int ret;
388 386
389 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); 387 brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
390 388
@@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
393 ); 391 );
394 392
395 /* set or restore gmode as required by regulatory */ 393 /* set or restore gmode as required by regulatory */
396 ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule); 394 if (ch->flags & IEEE80211_CHAN_NO_OFDM)
397 if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
398 brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); 395 brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
399 else 396 else
400 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); 397 brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 9e79d47e077f..192ad5c1fcc8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -121,7 +121,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = {
121 IEEE80211_CHAN_NO_HT40PLUS), 121 IEEE80211_CHAN_NO_HT40PLUS),
122 CHAN2GHZ(14, 2484, 122 CHAN2GHZ(14, 2484,
123 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS | 123 IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
124 IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) 124 IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
125 IEEE80211_CHAN_NO_OFDM)
125}; 126};
126 127
127static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = { 128static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 6fddd2785e6e..a82f46c10f5e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -707,11 +707,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
707 */ 707 */
708static bool rs_use_green(struct ieee80211_sta *sta) 708static bool rs_use_green(struct ieee80211_sta *sta)
709{ 709{
710 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 710 /*
711 struct iwl_rxon_context *ctx = sta_priv->ctx; 711 * There's a bug somewhere in this code that causes the
712 712 * scaling to get stuck because GF+SGI can't be combined
713 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && 713 * in SISO rates. Until we find that bug, disable GF, it
714 !(ctx->ht.non_gf_sta_present); 714 * has only limited benefit and we still interoperate with
715 * GF APs since we can always receive GF transmissions.
716 */
717 return false;
715} 718}
716 719
717/** 720/**
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index eb5de800ed90..1c10b542ab23 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv,
1254 netif_tx_wake_all_queues(priv->dev); 1254 netif_tx_wake_all_queues(priv->dev);
1255 } 1255 }
1256 1256
1257 kfree(cmd);
1257done: 1258done:
1258 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); 1259 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
1259 return ret; 1260 return ret;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 76caebaa4397..e970897f6ab5 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func)
1314 kfree(packet); 1314 kfree(packet);
1315 } 1315 }
1316 1316
1317 kfree(card);
1317 lbs_deb_leave(LBS_DEB_SDIO); 1318 lbs_deb_leave(LBS_DEB_SDIO);
1318} 1319}
1319 1320
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 58048189bd24..fe1ea43c5149 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -571,7 +571,10 @@ static int lbs_thread(void *data)
571 netdev_info(dev, "Timeout submitting command 0x%04x\n", 571 netdev_info(dev, "Timeout submitting command 0x%04x\n",
572 le16_to_cpu(cmdnode->cmdbuf->command)); 572 le16_to_cpu(cmdnode->cmdbuf->command));
573 lbs_complete_command(priv, cmdnode, -ETIMEDOUT); 573 lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
574 if (priv->reset_card) 574
575 /* Reset card, but only when it isn't in the process
576 * of being shutdown anyway. */
577 if (!dev->dismantle && priv->reset_card)
575 priv->reset_card(priv); 578 priv->reset_card(priv);
576 } 579 }
577 priv->cmd_timed_out = 0; 580 priv->cmd_timed_out = 0;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88455b1b9fe0..cb8c2aca54e4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
221 mutex_unlock(&rt2x00dev->csr_mutex); 221 mutex_unlock(&rt2x00dev->csr_mutex);
222} 222}
223 223
224static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
225{
226 u32 reg;
227 int i, count;
228
229 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
230 if (rt2x00_get_field32(reg, WLAN_EN))
231 return 0;
232
233 rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
234 rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
235 rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
236 rt2x00_set_field32(&reg, WLAN_EN, 1);
237 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
238
239 udelay(REGISTER_BUSY_DELAY);
240
241 count = 0;
242 do {
243 /*
244 * Check PLL_LD & XTAL_RDY.
245 */
246 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
247 rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
248 if (rt2x00_get_field32(reg, PLL_LD) &&
249 rt2x00_get_field32(reg, XTAL_RDY))
250 break;
251 udelay(REGISTER_BUSY_DELAY);
252 }
253
254 if (i >= REGISTER_BUSY_COUNT) {
255
256 if (count >= 10)
257 return -EIO;
258
259 rt2800_register_write(rt2x00dev, 0x58, 0x018);
260 udelay(REGISTER_BUSY_DELAY);
261 rt2800_register_write(rt2x00dev, 0x58, 0x418);
262 udelay(REGISTER_BUSY_DELAY);
263 rt2800_register_write(rt2x00dev, 0x58, 0x618);
264 udelay(REGISTER_BUSY_DELAY);
265 count++;
266 } else {
267 count = 0;
268 }
269
270 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
271 rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
272 rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
273 rt2x00_set_field32(&reg, WLAN_RESET, 1);
274 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
275 udelay(10);
276 rt2x00_set_field32(&reg, WLAN_RESET, 0);
277 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
278 udelay(10);
279 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
280 } while (count != 0);
281
282 return 0;
283}
284
224void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, 285void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
225 const u8 command, const u8 token, 286 const u8 command, const u8 token,
226 const u8 arg0, const u8 arg1) 287 const u8 arg0, const u8 arg1)
@@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
400{ 461{
401 unsigned int i; 462 unsigned int i;
402 u32 reg; 463 u32 reg;
464 int retval;
465
466 if (rt2x00_rt(rt2x00dev, RT3290)) {
467 retval = rt2800_enable_wlan_rt3290(rt2x00dev);
468 if (retval)
469 return -EBUSY;
470 }
403 471
404 /* 472 /*
405 * If driver doesn't wake up firmware here, 473 * If driver doesn't wake up firmware here,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 235376e9cb04..98aa426a3564 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -980,66 +980,6 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
980 return rt2800_validate_eeprom(rt2x00dev); 980 return rt2800_validate_eeprom(rt2x00dev);
981} 981}
982 982
983static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
984{
985 u32 reg;
986 int i, count;
987
988 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
989 if (rt2x00_get_field32(reg, WLAN_EN))
990 return 0;
991
992 rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
993 rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
994 rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
995 rt2x00_set_field32(&reg, WLAN_EN, 1);
996 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
997
998 udelay(REGISTER_BUSY_DELAY);
999
1000 count = 0;
1001 do {
1002 /*
1003 * Check PLL_LD & XTAL_RDY.
1004 */
1005 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1006 rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
1007 if (rt2x00_get_field32(reg, PLL_LD) &&
1008 rt2x00_get_field32(reg, XTAL_RDY))
1009 break;
1010 udelay(REGISTER_BUSY_DELAY);
1011 }
1012
1013 if (i >= REGISTER_BUSY_COUNT) {
1014
1015 if (count >= 10)
1016 return -EIO;
1017
1018 rt2800_register_write(rt2x00dev, 0x58, 0x018);
1019 udelay(REGISTER_BUSY_DELAY);
1020 rt2800_register_write(rt2x00dev, 0x58, 0x418);
1021 udelay(REGISTER_BUSY_DELAY);
1022 rt2800_register_write(rt2x00dev, 0x58, 0x618);
1023 udelay(REGISTER_BUSY_DELAY);
1024 count++;
1025 } else {
1026 count = 0;
1027 }
1028
1029 rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
1030 rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
1031 rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
1032 rt2x00_set_field32(&reg, WLAN_RESET, 1);
1033 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
1034 udelay(10);
1035 rt2x00_set_field32(&reg, WLAN_RESET, 0);
1036 rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
1037 udelay(10);
1038 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
1039 } while (count != 0);
1040
1041 return 0;
1042}
1043static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) 983static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1044{ 984{
1045 int retval; 985 int retval;
@@ -1063,17 +1003,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1063 return retval; 1003 return retval;
1064 1004
1065 /* 1005 /*
1066 * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
1067 * clk for rt3290. That avoid the MCU fail in start phase.
1068 */
1069 if (rt2x00_rt(rt2x00dev, RT3290)) {
1070 retval = rt2800_enable_wlan_rt3290(rt2x00dev);
1071
1072 if (retval)
1073 return retval;
1074 }
1075
1076 /*
1077 * This device has multiple filters for control frames 1006 * This device has multiple filters for control frames
1078 * and has a separate filter for PS Poll frames. 1007 * and has a separate filter for PS Poll frames.
1079 */ 1008 */
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index f32259686b45..3f7bc5cadf9a 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2243 2243
2244static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev) 2244static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
2245{ 2245{
2246 struct ieee80211_conf conf = { .flags = 0 }; 2246 struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
2247 struct rt2x00lib_conf libconf = { .conf = &conf };
2248 2247
2249 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 2248 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
2250} 2249}
diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c
index 75d3eff94296..3674d877ed7c 100644
--- a/drivers/pinctrl/pinctrl-imx23.c
+++ b/drivers/pinctrl/pinctrl-imx23.c
@@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void)
292{ 292{
293 return platform_driver_register(&imx23_pinctrl_driver); 293 return platform_driver_register(&imx23_pinctrl_driver);
294} 294}
295arch_initcall(imx23_pinctrl_init); 295postcore_initcall(imx23_pinctrl_init);
296 296
297static void __exit imx23_pinctrl_exit(void) 297static void __exit imx23_pinctrl_exit(void)
298{ 298{
diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c
index b973026811a2..0f5b2122b1ba 100644
--- a/drivers/pinctrl/pinctrl-imx28.c
+++ b/drivers/pinctrl/pinctrl-imx28.c
@@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void)
408{ 408{
409 return platform_driver_register(&imx28_pinctrl_driver); 409 return platform_driver_register(&imx28_pinctrl_driver);
410} 410}
411arch_initcall(imx28_pinctrl_init); 411postcore_initcall(imx28_pinctrl_init);
412 412
413static void __exit imx28_pinctrl_exit(void) 413static void __exit imx28_pinctrl_exit(void)
414{ 414{
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 6f99769c6733..5f3e9d0221e1 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -766,7 +766,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1",
766DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1"); 766DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1");
767DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1"); 767DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
768DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1"); 768DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1");
769DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2"); 769DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2");
770DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1"); 770DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1");
771DB8500_FUNC_GROUPS(usb, "usb_a_1"); 771DB8500_FUNC_GROUPS(usb, "usb_a_1");
772DB8500_FUNC_GROUPS(trig, "trig_b_1"); 772DB8500_FUNC_GROUPS(trig, "trig_b_1");
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 53b0d49a7a1c..ec6ac501b23a 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -1731,7 +1731,6 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
1731 for (i = 0; i < npct->soc->gpio_num_ranges; i++) { 1731 for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
1732 if (!nmk_gpio_chips[i]) { 1732 if (!nmk_gpio_chips[i]) {
1733 dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); 1733 dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
1734 devm_kfree(&pdev->dev, npct);
1735 return -EPROBE_DEFER; 1734 return -EPROBE_DEFER;
1736 } 1735 }
1737 npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip; 1736 npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip;
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index 2aae8a8978e9..7fca6ce5952b 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1217,7 +1217,6 @@ out_no_rsc_remap:
1217 iounmap(spmx->gpio_virtbase); 1217 iounmap(spmx->gpio_virtbase);
1218out_no_gpio_remap: 1218out_no_gpio_remap:
1219 platform_set_drvdata(pdev, NULL); 1219 platform_set_drvdata(pdev, NULL);
1220 devm_kfree(&pdev->dev, spmx);
1221 return ret; 1220 return ret;
1222} 1221}
1223 1222
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index a7ad8c112d91..309f5b9a70ec 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1121,10 +1121,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev)
1121 upmx->dev = &pdev->dev; 1121 upmx->dev = &pdev->dev;
1122 1122
1123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1123 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1124 if (!res) { 1124 if (!res)
1125 ret = -ENOENT; 1125 return -ENOENT;
1126 goto out_no_resource;
1127 }
1128 upmx->phybase = res->start; 1126 upmx->phybase = res->start;
1129 upmx->physize = resource_size(res); 1127 upmx->physize = resource_size(res);
1130 1128
@@ -1165,8 +1163,6 @@ out_no_remap:
1165 platform_set_drvdata(pdev, NULL); 1163 platform_set_drvdata(pdev, NULL);
1166out_no_memregion: 1164out_no_memregion:
1167 release_mem_region(upmx->phybase, upmx->physize); 1165 release_mem_region(upmx->phybase, upmx->physize);
1168out_no_resource:
1169 devm_kfree(&pdev->dev, upmx);
1170 return ret; 1166 return ret;
1171} 1167}
1172 1168
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 2ca7dd1ab3e4..cd33add118ce 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -350,6 +350,7 @@ static void cmpc_accel_idev_init_v4(struct input_dev *inputdev)
350 inputdev->close = cmpc_accel_close_v4; 350 inputdev->close = cmpc_accel_close_v4;
351} 351}
352 352
353#ifdef CONFIG_PM_SLEEP
353static int cmpc_accel_suspend_v4(struct device *dev) 354static int cmpc_accel_suspend_v4(struct device *dev)
354{ 355{
355 struct input_dev *inputdev; 356 struct input_dev *inputdev;
@@ -384,6 +385,7 @@ static int cmpc_accel_resume_v4(struct device *dev)
384 385
385 return 0; 386 return 0;
386} 387}
388#endif
387 389
388static int cmpc_accel_add_v4(struct acpi_device *acpi) 390static int cmpc_accel_add_v4(struct acpi_device *acpi)
389{ 391{
@@ -752,6 +754,7 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type)
752 return cmpc_remove_acpi_notify_device(acpi); 754 return cmpc_remove_acpi_notify_device(acpi);
753} 755}
754 756
757#ifdef CONFIG_PM_SLEEP
755static int cmpc_tablet_resume(struct device *dev) 758static int cmpc_tablet_resume(struct device *dev)
756{ 759{
757 struct input_dev *inputdev = dev_get_drvdata(dev); 760 struct input_dev *inputdev = dev_get_drvdata(dev);
@@ -761,6 +764,7 @@ static int cmpc_tablet_resume(struct device *dev)
761 input_report_switch(inputdev, SW_TABLET_MODE, !val); 764 input_report_switch(inputdev, SW_TABLET_MODE, !val);
762 return 0; 765 return 0;
763} 766}
767#endif
764 768
765static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume); 769static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume);
766 770
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index d2e41735a47b..7acae3f85f3b 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -440,11 +440,13 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
440 return 0; 440 return 0;
441} 441}
442 442
443#ifdef CONFIG_PM_SLEEP
443static int acpi_fujitsu_resume(struct device *dev) 444static int acpi_fujitsu_resume(struct device *dev)
444{ 445{
445 fujitsu_reset(); 446 fujitsu_reset();
446 return 0; 447 return 0;
447} 448}
449#endif
448 450
449static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume); 451static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume);
450 452
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index d9ab6f64dcec..777c7e3dda51 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -305,10 +305,12 @@ static int hdaps_probe(struct platform_device *dev)
305 return 0; 305 return 0;
306} 306}
307 307
308#ifdef CONFIG_PM_SLEEP
308static int hdaps_resume(struct device *dev) 309static int hdaps_resume(struct device *dev)
309{ 310{
310 return hdaps_device_init(); 311 return hdaps_device_init();
311} 312}
313#endif
312 314
313static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume); 315static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume);
314 316
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index f4d91154ad67..6b9af989632b 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -352,7 +352,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
352} 352}
353 353
354 354
355#ifdef CONFIG_PM 355#ifdef CONFIG_PM_SLEEP
356static int lis3lv02d_suspend(struct device *dev) 356static int lis3lv02d_suspend(struct device *dev)
357{ 357{
358 /* make sure the device is off when we suspend */ 358 /* make sure the device is off when we suspend */
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index f64441844317..2111dbb7e1e3 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -85,7 +85,9 @@
85#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4 85#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4
86#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4) 86#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
87 87
88#ifdef CONFIG_PM_SLEEP
88static int msi_laptop_resume(struct device *device); 89static int msi_laptop_resume(struct device *device);
90#endif
89static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume); 91static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume);
90 92
91#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f 93#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
@@ -753,6 +755,7 @@ err_bluetooth:
753 return retval; 755 return retval;
754} 756}
755 757
758#ifdef CONFIG_PM_SLEEP
756static int msi_laptop_resume(struct device *device) 759static int msi_laptop_resume(struct device *device)
757{ 760{
758 u8 data; 761 u8 data;
@@ -773,6 +776,7 @@ static int msi_laptop_resume(struct device *device)
773 776
774 return 0; 777 return 0;
775} 778}
779#endif
776 780
777static int __init msi_laptop_input_setup(void) 781static int __init msi_laptop_input_setup(void)
778{ 782{
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 24480074bcf0..8e8caa767d6a 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -188,7 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = {
188}; 188};
189MODULE_DEVICE_TABLE(acpi, pcc_device_ids); 189MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
190 190
191#ifdef CONFIG_PM_SLEEP
191static int acpi_pcc_hotkey_resume(struct device *dev); 192static int acpi_pcc_hotkey_resume(struct device *dev);
193#endif
192static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume); 194static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume);
193 195
194static struct acpi_driver acpi_pcc_driver = { 196static struct acpi_driver acpi_pcc_driver = {
@@ -540,6 +542,7 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc)
540 542
541/* kernel module interface */ 543/* kernel module interface */
542 544
545#ifdef CONFIG_PM_SLEEP
543static int acpi_pcc_hotkey_resume(struct device *dev) 546static int acpi_pcc_hotkey_resume(struct device *dev)
544{ 547{
545 struct pcc_acpi *pcc; 548 struct pcc_acpi *pcc;
@@ -556,6 +559,7 @@ static int acpi_pcc_hotkey_resume(struct device *dev)
556 559
557 return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode); 560 return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode);
558} 561}
562#endif
559 563
560static int acpi_pcc_hotkey_add(struct acpi_device *device) 564static int acpi_pcc_hotkey_add(struct acpi_device *device)
561{ 565{
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 9363969ad07a..daaddec68def 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -140,7 +140,10 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
140 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " 140 "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
141 "(default: 0)"); 141 "(default: 0)");
142 142
143#ifdef CONFIG_PM_SLEEP
143static void sony_nc_kbd_backlight_resume(void); 144static void sony_nc_kbd_backlight_resume(void);
145static void sony_nc_thermal_resume(void);
146#endif
144static int sony_nc_kbd_backlight_setup(struct platform_device *pd, 147static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
145 unsigned int handle); 148 unsigned int handle);
146static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd); 149static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
@@ -151,7 +154,6 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd);
151 154
152static int sony_nc_thermal_setup(struct platform_device *pd); 155static int sony_nc_thermal_setup(struct platform_device *pd);
153static void sony_nc_thermal_cleanup(struct platform_device *pd); 156static void sony_nc_thermal_cleanup(struct platform_device *pd);
154static void sony_nc_thermal_resume(void);
155 157
156static int sony_nc_lid_resume_setup(struct platform_device *pd); 158static int sony_nc_lid_resume_setup(struct platform_device *pd);
157static void sony_nc_lid_resume_cleanup(struct platform_device *pd); 159static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
@@ -1431,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
1431 sony_nc_handles_cleanup(pd); 1433 sony_nc_handles_cleanup(pd);
1432} 1434}
1433 1435
1436#ifdef CONFIG_PM_SLEEP
1434static void sony_nc_function_resume(void) 1437static void sony_nc_function_resume(void)
1435{ 1438{
1436 unsigned int i, result, bitmask, arg; 1439 unsigned int i, result, bitmask, arg;
@@ -1508,6 +1511,7 @@ static int sony_nc_resume(struct device *dev)
1508 1511
1509 return 0; 1512 return 0;
1510} 1513}
1514#endif
1511 1515
1512static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume); 1516static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume);
1513 1517
@@ -1872,6 +1876,7 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1872 } 1876 }
1873} 1877}
1874 1878
1879#ifdef CONFIG_PM_SLEEP
1875static void sony_nc_kbd_backlight_resume(void) 1880static void sony_nc_kbd_backlight_resume(void)
1876{ 1881{
1877 int ignore = 0; 1882 int ignore = 0;
@@ -1888,6 +1893,7 @@ static void sony_nc_kbd_backlight_resume(void)
1888 (kbdbl_ctl->base + 0x200) | 1893 (kbdbl_ctl->base + 0x200) |
1889 (kbdbl_ctl->timeout << 0x10), &ignore); 1894 (kbdbl_ctl->timeout << 0x10), &ignore);
1890} 1895}
1896#endif
1891 1897
1892struct battery_care_control { 1898struct battery_care_control {
1893 struct device_attribute attrs[2]; 1899 struct device_attribute attrs[2];
@@ -2210,6 +2216,7 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd)
2210 } 2216 }
2211} 2217}
2212 2218
2219#ifdef CONFIG_PM_SLEEP
2213static void sony_nc_thermal_resume(void) 2220static void sony_nc_thermal_resume(void)
2214{ 2221{
2215 unsigned int status = sony_nc_thermal_mode_get(); 2222 unsigned int status = sony_nc_thermal_mode_get();
@@ -2217,6 +2224,7 @@ static void sony_nc_thermal_resume(void)
2217 if (status != th_handle->mode) 2224 if (status != th_handle->mode)
2218 sony_nc_thermal_mode_set(th_handle->mode); 2225 sony_nc_thermal_mode_set(th_handle->mode);
2219} 2226}
2227#endif
2220 2228
2221/* resume on LID open */ 2229/* resume on LID open */
2222struct snc_lid_resume_control { 2230struct snc_lid_resume_control {
@@ -4287,6 +4295,7 @@ err_free_resources:
4287 return result; 4295 return result;
4288} 4296}
4289 4297
4298#ifdef CONFIG_PM_SLEEP
4290static int sony_pic_suspend(struct device *dev) 4299static int sony_pic_suspend(struct device *dev)
4291{ 4300{
4292 if (sony_pic_disable(to_acpi_device(dev))) 4301 if (sony_pic_disable(to_acpi_device(dev)))
@@ -4300,6 +4309,7 @@ static int sony_pic_resume(struct device *dev)
4300 spic_dev.cur_ioport, spic_dev.cur_irq); 4309 spic_dev.cur_ioport, spic_dev.cur_irq);
4301 return 0; 4310 return 0;
4302} 4311}
4312#endif
4303 4313
4304static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume); 4314static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume);
4305 4315
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e7f73287636c..f28f36ccdcf4 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -922,6 +922,7 @@ static struct input_dev *tpacpi_inputdev;
922static struct mutex tpacpi_inputdev_send_mutex; 922static struct mutex tpacpi_inputdev_send_mutex;
923static LIST_HEAD(tpacpi_all_drivers); 923static LIST_HEAD(tpacpi_all_drivers);
924 924
925#ifdef CONFIG_PM_SLEEP
925static int tpacpi_suspend_handler(struct device *dev) 926static int tpacpi_suspend_handler(struct device *dev)
926{ 927{
927 struct ibm_struct *ibm, *itmp; 928 struct ibm_struct *ibm, *itmp;
@@ -949,6 +950,7 @@ static int tpacpi_resume_handler(struct device *dev)
949 950
950 return 0; 951 return 0;
951} 952}
953#endif
952 954
953static SIMPLE_DEV_PM_OPS(tpacpi_pm, 955static SIMPLE_DEV_PM_OPS(tpacpi_pm,
954 tpacpi_suspend_handler, tpacpi_resume_handler); 956 tpacpi_suspend_handler, tpacpi_resume_handler);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index c13ba5bac93f..5f1256d5e933 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1296,6 +1296,7 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
1296 } 1296 }
1297} 1297}
1298 1298
1299#ifdef CONFIG_PM_SLEEP
1299static int toshiba_acpi_suspend(struct device *device) 1300static int toshiba_acpi_suspend(struct device *device)
1300{ 1301{
1301 struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); 1302 struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
@@ -1317,6 +1318,7 @@ static int toshiba_acpi_resume(struct device *device)
1317 1318
1318 return 0; 1319 return 0;
1319} 1320}
1321#endif
1320 1322
1321static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm, 1323static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
1322 toshiba_acpi_suspend, toshiba_acpi_resume); 1324 toshiba_acpi_suspend, toshiba_acpi_resume);
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 715a43cb5e3c..5e5d6317d690 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -41,7 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = {
41}; 41};
42MODULE_DEVICE_TABLE(acpi, bt_device_ids); 42MODULE_DEVICE_TABLE(acpi, bt_device_ids);
43 43
44#ifdef CONFIG_PM_SLEEP
44static int toshiba_bt_resume(struct device *dev); 45static int toshiba_bt_resume(struct device *dev);
46#endif
45static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume); 47static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume);
46 48
47static struct acpi_driver toshiba_bt_rfkill_driver = { 49static struct acpi_driver toshiba_bt_rfkill_driver = {
@@ -90,10 +92,12 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
90 toshiba_bluetooth_enable(device->handle); 92 toshiba_bluetooth_enable(device->handle);
91} 93}
92 94
95#ifdef CONFIG_PM_SLEEP
93static int toshiba_bt_resume(struct device *dev) 96static int toshiba_bt_resume(struct device *dev)
94{ 97{
95 return toshiba_bluetooth_enable(to_acpi_device(dev)->handle); 98 return toshiba_bluetooth_enable(to_acpi_device(dev)->handle);
96} 99}
100#endif
97 101
98static int toshiba_bt_rfkill_add(struct acpi_device *device) 102static int toshiba_bt_rfkill_add(struct acpi_device *device)
99{ 103{
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 849c07c13bf6..38ba39d7ca7d 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -77,10 +77,12 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event)
77 } 77 }
78} 78}
79 79
80#ifdef CONFIG_PM_SLEEP
80static int ebook_switch_resume(struct device *dev) 81static int ebook_switch_resume(struct device *dev)
81{ 82{
82 return ebook_send_state(to_acpi_device(dev)); 83 return ebook_send_state(to_acpi_device(dev));
83} 84}
85#endif
84 86
85static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume); 87static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume);
86 88
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index eb415bd76494..9592b936b71b 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -582,6 +582,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
582void rtc_update_irq(struct rtc_device *rtc, 582void rtc_update_irq(struct rtc_device *rtc,
583 unsigned long num, unsigned long events) 583 unsigned long num, unsigned long events)
584{ 584{
585 pm_stay_awake(rtc->dev.parent);
585 schedule_work(&rtc->irqwork); 586 schedule_work(&rtc->irqwork);
586} 587}
587EXPORT_SYMBOL_GPL(rtc_update_irq); 588EXPORT_SYMBOL_GPL(rtc_update_irq);
@@ -844,6 +845,7 @@ void rtc_timer_do_work(struct work_struct *work)
844 845
845 mutex_lock(&rtc->ops_lock); 846 mutex_lock(&rtc->ops_lock);
846again: 847again:
848 pm_relax(rtc->dev.parent);
847 __rtc_read_time(rtc, &tm); 849 __rtc_read_time(rtc, &tm);
848 now = rtc_tm_to_ktime(tm); 850 now = rtc_tm_to_ktime(tm);
849 while ((next = timerqueue_getnext(&rtc->timerqueue))) { 851 while ((next = timerqueue_getnext(&rtc->timerqueue))) {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 132333d75408..4267789ca995 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -568,7 +568,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
568 hpet_mask_rtc_irq_bit(RTC_AIE); 568 hpet_mask_rtc_irq_bit(RTC_AIE);
569 569
570 CMOS_READ(RTC_INTR_FLAGS); 570 CMOS_READ(RTC_INTR_FLAGS);
571 pm_wakeup_event(cmos_rtc.dev, 0);
572 } 571 }
573 spin_unlock(&rtc_lock); 572 spin_unlock(&rtc_lock);
574 573
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 6a6f76bf6e3d..b1032931a1c4 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -242,11 +242,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
242 switch (sdias_evbuf.event_status) { 242 switch (sdias_evbuf.event_status) {
243 case EVSTATE_ALL_STORED: 243 case EVSTATE_ALL_STORED:
244 TRACE("all stored\n"); 244 TRACE("all stored\n");
245 break;
245 case EVSTATE_PART_STORED: 246 case EVSTATE_PART_STORED:
246 TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); 247 TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
247 break; 248 break;
248 case EVSTATE_NO_DATA: 249 case EVSTATE_NO_DATA:
249 TRACE("no data\n"); 250 TRACE("no data\n");
251 /* fall through */
250 default: 252 default:
251 pr_err("Error from SCLP while copying hsa. " 253 pr_err("Error from SCLP while copying hsa. "
252 "Event status = %x\n", 254 "Event status = %x\n",
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 2374468615ed..32c26d795ed0 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -324,8 +324,16 @@ int __init register_intc_controller(struct intc_desc *desc)
324 324
325 res = irq_create_identity_mapping(d->domain, irq); 325 res = irq_create_identity_mapping(d->domain, irq);
326 if (unlikely(res)) { 326 if (unlikely(res)) {
327 pr_err("can't get irq_desc for %d\n", irq); 327 if (res == -EEXIST) {
328 continue; 328 res = irq_domain_associate(d->domain, irq, irq);
329 if (unlikely(res)) {
330 pr_err("domain association failure\n");
331 continue;
332 }
333 } else {
334 pr_err("can't identity map IRQ %d\n", irq);
335 continue;
336 }
329 } 337 }
330 338
331 intc_irq_xlate_set(irq, vect->enum_id, d); 339 intc_irq_xlate_set(irq, vect->enum_id, d);
@@ -345,8 +353,19 @@ int __init register_intc_controller(struct intc_desc *desc)
345 */ 353 */
346 res = irq_create_identity_mapping(d->domain, irq2); 354 res = irq_create_identity_mapping(d->domain, irq2);
347 if (unlikely(res)) { 355 if (unlikely(res)) {
348 pr_err("can't get irq_desc for %d\n", irq2); 356 if (res == -EEXIST) {
349 continue; 357 res = irq_domain_associate(d->domain,
358 irq, irq);
359 if (unlikely(res)) {
360 pr_err("domain association "
361 "failure\n");
362 continue;
363 }
364 } else {
365 pr_err("can't identity map IRQ %d\n",
366 irq);
367 continue;
368 }
350 } 369 }
351 370
352 vect2->enum_id = 0; 371 vect2->enum_id = 0;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index ee0ebacf8227..89dcf155d57e 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
450 writel(FLAG_CF, &ehci_regs->configured_flag); 450 writel(FLAG_CF, &ehci_regs->configured_flag);
451 451
452 /* Wait until the controller is no longer halted */ 452 /* Wait until the controller is no longer halted */
453 loop = 10; 453 loop = 1000;
454 do { 454 do {
455 status = readl(&ehci_regs->status); 455 status = readl(&ehci_regs->status);
456 if (!(status & STS_HALT)) 456 if (!(status & STS_HALT))
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index e4e2fd1b5107..202bba6c997c 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -9,3 +9,6 @@ config VHOST_NET
9 To compile this driver as a module, choose M here: the module will 9 To compile this driver as a module, choose M here: the module will
10 be called vhost_net. 10 be called vhost_net.
11 11
12if STAGING
13source "drivers/vhost/Kconfig.tcm"
14endif
diff --git a/drivers/vhost/Kconfig.tcm b/drivers/vhost/Kconfig.tcm
new file mode 100644
index 000000000000..a9c6f76e3208
--- /dev/null
+++ b/drivers/vhost/Kconfig.tcm
@@ -0,0 +1,6 @@
1config TCM_VHOST
2 tristate "TCM_VHOST fabric module (EXPERIMENTAL)"
3 depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m
4 default n
5 ---help---
6 Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index 72dd02050bb9..a27b053bc9ab 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -1,2 +1,4 @@
1obj-$(CONFIG_VHOST_NET) += vhost_net.o 1obj-$(CONFIG_VHOST_NET) += vhost_net.o
2vhost_net-y := vhost.o net.o 2vhost_net-y := vhost.o net.o
3
4obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
new file mode 100644
index 000000000000..fb366540ed54
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.c
@@ -0,0 +1,1628 @@
1/*******************************************************************************
2 * Vhost kernel TCM fabric driver for virtio SCSI initiators
3 *
4 * (C) Copyright 2010-2012 RisingTide Systems LLC.
5 * (C) Copyright 2010-2012 IBM Corp.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
8 *
9 * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
10 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 ****************************************************************************/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <generated/utsrelease.h>
27#include <linux/utsname.h>
28#include <linux/init.h>
29#include <linux/slab.h>
30#include <linux/kthread.h>
31#include <linux/types.h>
32#include <linux/string.h>
33#include <linux/configfs.h>
34#include <linux/ctype.h>
35#include <linux/compat.h>
36#include <linux/eventfd.h>
37#include <linux/vhost.h>
38#include <linux/fs.h>
39#include <linux/miscdevice.h>
40#include <asm/unaligned.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_tcq.h>
43#include <target/target_core_base.h>
44#include <target/target_core_fabric.h>
45#include <target/target_core_fabric_configfs.h>
46#include <target/target_core_configfs.h>
47#include <target/configfs_macros.h>
48#include <linux/vhost.h>
49#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
50#include <linux/virtio_scsi.h>
51
52#include "vhost.c"
53#include "vhost.h"
54#include "tcm_vhost.h"
55
56struct vhost_scsi {
57 atomic_t vhost_ref_cnt;
58 struct tcm_vhost_tpg *vs_tpg;
59 struct vhost_dev dev;
60 struct vhost_virtqueue vqs[3];
61
62 struct vhost_work vs_completion_work; /* cmd completion work item */
63 struct list_head vs_completion_list; /* cmd completion queue */
64 spinlock_t vs_completion_lock; /* protects s_completion_list */
65};
66
67/* Local pointer to allocated TCM configfs fabric module */
68static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
69
70static struct workqueue_struct *tcm_vhost_workqueue;
71
72/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
73static DEFINE_MUTEX(tcm_vhost_mutex);
74static LIST_HEAD(tcm_vhost_list);
75
76static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
77{
78 return 1;
79}
80
81static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
82{
83 return 0;
84}
85
86static char *tcm_vhost_get_fabric_name(void)
87{
88 return "vhost";
89}
90
91static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
92{
93 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
94 struct tcm_vhost_tpg, se_tpg);
95 struct tcm_vhost_tport *tport = tpg->tport;
96
97 switch (tport->tport_proto_id) {
98 case SCSI_PROTOCOL_SAS:
99 return sas_get_fabric_proto_ident(se_tpg);
100 case SCSI_PROTOCOL_FCP:
101 return fc_get_fabric_proto_ident(se_tpg);
102 case SCSI_PROTOCOL_ISCSI:
103 return iscsi_get_fabric_proto_ident(se_tpg);
104 default:
105 pr_err("Unknown tport_proto_id: 0x%02x, using"
106 " SAS emulation\n", tport->tport_proto_id);
107 break;
108 }
109
110 return sas_get_fabric_proto_ident(se_tpg);
111}
112
113static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
114{
115 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
116 struct tcm_vhost_tpg, se_tpg);
117 struct tcm_vhost_tport *tport = tpg->tport;
118
119 return &tport->tport_name[0];
120}
121
122static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
123{
124 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
125 struct tcm_vhost_tpg, se_tpg);
126 return tpg->tport_tpgt;
127}
128
129static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
130{
131 return 1;
132}
133
134static u32 tcm_vhost_get_pr_transport_id(
135 struct se_portal_group *se_tpg,
136 struct se_node_acl *se_nacl,
137 struct t10_pr_registration *pr_reg,
138 int *format_code,
139 unsigned char *buf)
140{
141 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
142 struct tcm_vhost_tpg, se_tpg);
143 struct tcm_vhost_tport *tport = tpg->tport;
144
145 switch (tport->tport_proto_id) {
146 case SCSI_PROTOCOL_SAS:
147 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
148 format_code, buf);
149 case SCSI_PROTOCOL_FCP:
150 return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
151 format_code, buf);
152 case SCSI_PROTOCOL_ISCSI:
153 return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
154 format_code, buf);
155 default:
156 pr_err("Unknown tport_proto_id: 0x%02x, using"
157 " SAS emulation\n", tport->tport_proto_id);
158 break;
159 }
160
161 return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
162 format_code, buf);
163}
164
165static u32 tcm_vhost_get_pr_transport_id_len(
166 struct se_portal_group *se_tpg,
167 struct se_node_acl *se_nacl,
168 struct t10_pr_registration *pr_reg,
169 int *format_code)
170{
171 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
172 struct tcm_vhost_tpg, se_tpg);
173 struct tcm_vhost_tport *tport = tpg->tport;
174
175 switch (tport->tport_proto_id) {
176 case SCSI_PROTOCOL_SAS:
177 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
178 format_code);
179 case SCSI_PROTOCOL_FCP:
180 return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
181 format_code);
182 case SCSI_PROTOCOL_ISCSI:
183 return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
184 format_code);
185 default:
186 pr_err("Unknown tport_proto_id: 0x%02x, using"
187 " SAS emulation\n", tport->tport_proto_id);
188 break;
189 }
190
191 return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
192 format_code);
193}
194
195static char *tcm_vhost_parse_pr_out_transport_id(
196 struct se_portal_group *se_tpg,
197 const char *buf,
198 u32 *out_tid_len,
199 char **port_nexus_ptr)
200{
201 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
202 struct tcm_vhost_tpg, se_tpg);
203 struct tcm_vhost_tport *tport = tpg->tport;
204
205 switch (tport->tport_proto_id) {
206 case SCSI_PROTOCOL_SAS:
207 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
208 port_nexus_ptr);
209 case SCSI_PROTOCOL_FCP:
210 return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
211 port_nexus_ptr);
212 case SCSI_PROTOCOL_ISCSI:
213 return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
214 port_nexus_ptr);
215 default:
216 pr_err("Unknown tport_proto_id: 0x%02x, using"
217 " SAS emulation\n", tport->tport_proto_id);
218 break;
219 }
220
221 return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
222 port_nexus_ptr);
223}
224
225static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
226 struct se_portal_group *se_tpg)
227{
228 struct tcm_vhost_nacl *nacl;
229
230 nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
231 if (!nacl) {
232 pr_err("Unable to alocate struct tcm_vhost_nacl\n");
233 return NULL;
234 }
235
236 return &nacl->se_node_acl;
237}
238
239static void tcm_vhost_release_fabric_acl(
240 struct se_portal_group *se_tpg,
241 struct se_node_acl *se_nacl)
242{
243 struct tcm_vhost_nacl *nacl = container_of(se_nacl,
244 struct tcm_vhost_nacl, se_node_acl);
245 kfree(nacl);
246}
247
248static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
249{
250 return 1;
251}
252
253static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
254{
255 return;
256}
257
258static int tcm_vhost_shutdown_session(struct se_session *se_sess)
259{
260 return 0;
261}
262
263static void tcm_vhost_close_session(struct se_session *se_sess)
264{
265 return;
266}
267
268static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
269{
270 return 0;
271}
272
273static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
274{
275 /* Go ahead and process the write immediately */
276 target_execute_cmd(se_cmd);
277 return 0;
278}
279
280static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
281{
282 return 0;
283}
284
285static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
286{
287 return;
288}
289
290static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
291{
292 return 0;
293}
294
295static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
296{
297 return 0;
298}
299
300static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *);
301
302static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
303{
304 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
305 struct tcm_vhost_cmd, tvc_se_cmd);
306 vhost_scsi_complete_cmd(tv_cmd);
307 return 0;
308}
309
310static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
311{
312 struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
313 struct tcm_vhost_cmd, tvc_se_cmd);
314 vhost_scsi_complete_cmd(tv_cmd);
315 return 0;
316}
317
318static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
319{
320 return 0;
321}
322
323static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd,
324 u32 sense_length)
325{
326 return 0;
327}
328
329static u16 tcm_vhost_get_fabric_sense_len(void)
330{
331 return 0;
332}
333
334static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
335{
336 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
337
338 /* TODO locking against target/backend threads? */
339 transport_generic_free_cmd(se_cmd, 1);
340
341 if (tv_cmd->tvc_sgl_count) {
342 u32 i;
343 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
344 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
345
346 kfree(tv_cmd->tvc_sgl);
347 }
348
349 kfree(tv_cmd);
350}
351
352/* Dequeue a command from the completion list */
353static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
354 struct vhost_scsi *vs)
355{
356 struct tcm_vhost_cmd *tv_cmd = NULL;
357
358 spin_lock_bh(&vs->vs_completion_lock);
359 if (list_empty(&vs->vs_completion_list)) {
360 spin_unlock_bh(&vs->vs_completion_lock);
361 return NULL;
362 }
363
364 list_for_each_entry(tv_cmd, &vs->vs_completion_list,
365 tvc_completion_list) {
366 list_del(&tv_cmd->tvc_completion_list);
367 break;
368 }
369 spin_unlock_bh(&vs->vs_completion_lock);
370 return tv_cmd;
371}
372
373/* Fill in status and signal that we are done processing this command
374 *
375 * This is scheduled in the vhost work queue so we are called with the owner
376 * process mm and can access the vring.
377 */
378static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
379{
380 struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
381 vs_completion_work);
382 struct tcm_vhost_cmd *tv_cmd;
383
384 while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) {
385 struct virtio_scsi_cmd_resp v_rsp;
386 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
387 int ret;
388
389 pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
390 tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
391
392 memset(&v_rsp, 0, sizeof(v_rsp));
393 v_rsp.resid = se_cmd->residual_count;
394 /* TODO is status_qualifier field needed? */
395 v_rsp.status = se_cmd->scsi_status;
396 v_rsp.sense_len = se_cmd->scsi_sense_length;
397 memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
398 v_rsp.sense_len);
399 ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
400 if (likely(ret == 0))
401 vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
402 else
403 pr_err("Faulted on virtio_scsi_cmd_resp\n");
404
405 vhost_scsi_free_cmd(tv_cmd);
406 }
407
408 vhost_signal(&vs->dev, &vs->vqs[2]);
409}
410
411static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
412{
413 struct vhost_scsi *vs = tv_cmd->tvc_vhost;
414
415 pr_debug("%s tv_cmd %p\n", __func__, tv_cmd);
416
417 spin_lock_bh(&vs->vs_completion_lock);
418 list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
419 spin_unlock_bh(&vs->vs_completion_lock);
420
421 vhost_work_queue(&vs->dev, &vs->vs_completion_work);
422}
423
424static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
425 struct tcm_vhost_tpg *tv_tpg,
426 struct virtio_scsi_cmd_req *v_req,
427 u32 exp_data_len,
428 int data_direction)
429{
430 struct tcm_vhost_cmd *tv_cmd;
431 struct tcm_vhost_nexus *tv_nexus;
432 struct se_portal_group *se_tpg = &tv_tpg->se_tpg;
433 struct se_session *se_sess;
434 struct se_cmd *se_cmd;
435 int sam_task_attr;
436
437 tv_nexus = tv_tpg->tpg_nexus;
438 if (!tv_nexus) {
439 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
440 return ERR_PTR(-EIO);
441 }
442 se_sess = tv_nexus->tvn_se_sess;
443
444 tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
445 if (!tv_cmd) {
446 pr_err("Unable to allocate struct tcm_vhost_cmd\n");
447 return ERR_PTR(-ENOMEM);
448 }
449 INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
450 tv_cmd->tvc_tag = v_req->tag;
451
452 se_cmd = &tv_cmd->tvc_se_cmd;
453 /*
454 * Locate the SAM Task Attr from virtio_scsi_cmd_req
455 */
456 sam_task_attr = v_req->task_attr;
457 /*
458 * Initialize struct se_cmd descriptor from TCM infrastructure
459 */
460 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len,
461 data_direction, sam_task_attr,
462 &tv_cmd->tvc_sense_buf[0]);
463
464#if 0 /* FIXME: vhost_scsi_allocate_cmd() BIDI operation */
465 if (bidi)
466 se_cmd->se_cmd_flags |= SCF_BIDI;
467#endif
468 return tv_cmd;
469}
470
471/*
472 * Map a user memory range into a scatterlist
473 *
474 * Returns the number of scatterlist entries used or -errno on error.
475 */
476static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
477 unsigned int sgl_count, void __user *ptr, size_t len, int write)
478{
479 struct scatterlist *sg = sgl;
480 unsigned int npages = 0;
481 int ret;
482
483 while (len > 0) {
484 struct page *page;
485 unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
486 unsigned int nbytes = min_t(unsigned int,
487 PAGE_SIZE - offset, len);
488
489 if (npages == sgl_count) {
490 ret = -ENOBUFS;
491 goto err;
492 }
493
494 ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
495 BUG_ON(ret == 0); /* we should either get our page or fail */
496 if (ret < 0)
497 goto err;
498
499 sg_set_page(sg, page, nbytes, offset);
500 ptr += nbytes;
501 len -= nbytes;
502 sg++;
503 npages++;
504 }
505 return npages;
506
507err:
508 /* Put pages that we hold */
509 for (sg = sgl; sg != &sgl[npages]; sg++)
510 put_page(sg_page(sg));
511 return ret;
512}
513
514static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
515 struct iovec *iov, unsigned int niov, int write)
516{
517 int ret;
518 unsigned int i;
519 u32 sgl_count;
520 struct scatterlist *sg;
521
522 /*
523 * Find out how long sglist needs to be
524 */
525 sgl_count = 0;
526 for (i = 0; i < niov; i++) {
527 sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
528 PAGE_SIZE - 1) >> PAGE_SHIFT) -
529 ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
530 }
531 /* TODO overflow checking */
532
533 sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
534 if (!sg)
535 return -ENOMEM;
536 pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__,
537 sg, sgl_count, IS_ERR(sg));
538 sg_init_table(sg, sgl_count);
539
540 tv_cmd->tvc_sgl = sg;
541 tv_cmd->tvc_sgl_count = sgl_count;
542
543 pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
544 for (i = 0; i < niov; i++) {
545 ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
546 iov[i].iov_len, write);
547 if (ret < 0) {
548 for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
549 put_page(sg_page(&tv_cmd->tvc_sgl[i]));
550 kfree(tv_cmd->tvc_sgl);
551 tv_cmd->tvc_sgl = NULL;
552 tv_cmd->tvc_sgl_count = 0;
553 return ret;
554 }
555
556 sg += ret;
557 sgl_count -= ret;
558 }
559 return 0;
560}
561
562static void tcm_vhost_submission_work(struct work_struct *work)
563{
564 struct tcm_vhost_cmd *tv_cmd =
565 container_of(work, struct tcm_vhost_cmd, work);
566 struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
567 struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
568 int rc, sg_no_bidi = 0;
569 /*
570 * Locate the struct se_lun pointer based on v_req->lun, and
571 * attach it to struct se_cmd
572 */
573 rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun);
574 if (rc < 0) {
575 pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun);
576 transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd,
577 tv_cmd->tvc_se_cmd.scsi_sense_reason, 0);
578 transport_generic_free_cmd(se_cmd, 0);
579 return;
580 }
581
582 rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb);
583 if (rc == -ENOMEM) {
584 transport_send_check_condition_and_sense(se_cmd,
585 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
586 transport_generic_free_cmd(se_cmd, 0);
587 return;
588 } else if (rc < 0) {
589 if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
590 tcm_vhost_queue_status(se_cmd);
591 else
592 transport_send_check_condition_and_sense(se_cmd,
593 se_cmd->scsi_sense_reason, 0);
594 transport_generic_free_cmd(se_cmd, 0);
595 return;
596 }
597
598 if (tv_cmd->tvc_sgl_count) {
599 sg_ptr = tv_cmd->tvc_sgl;
600 /*
601 * For BIDI commands, pass in the extra READ buffer
602 * to transport_generic_map_mem_to_cmd() below..
603 */
604/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
605#if 0
606 if (se_cmd->se_cmd_flags & SCF_BIDI) {
607 sg_bidi_ptr = NULL;
608 sg_no_bidi = 0;
609 }
610#endif
611 } else {
612 sg_ptr = NULL;
613 }
614
615 rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr,
616 tv_cmd->tvc_sgl_count, sg_bidi_ptr,
617 sg_no_bidi);
618 if (rc < 0) {
619 transport_send_check_condition_and_sense(se_cmd,
620 se_cmd->scsi_sense_reason, 0);
621 transport_generic_free_cmd(se_cmd, 0);
622 return;
623 }
624 transport_handle_cdb_direct(se_cmd);
625}
626
627static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
628{
629 struct vhost_virtqueue *vq = &vs->vqs[2];
630 struct virtio_scsi_cmd_req v_req;
631 struct tcm_vhost_tpg *tv_tpg;
632 struct tcm_vhost_cmd *tv_cmd;
633 u32 exp_data_len, data_first, data_num, data_direction;
634 unsigned out, in, i;
635 int head, ret;
636
637 /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
638 tv_tpg = vs->vs_tpg;
639 if (unlikely(!tv_tpg)) {
640 pr_err("%s endpoint not set\n", __func__);
641 return;
642 }
643
644 mutex_lock(&vq->mutex);
645 vhost_disable_notify(&vs->dev, vq);
646
647 for (;;) {
648 head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
649 ARRAY_SIZE(vq->iov), &out, &in,
650 NULL, NULL);
651 pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
652 head, out, in);
653 /* On error, stop handling until the next kick. */
654 if (unlikely(head < 0))
655 break;
656 /* Nothing new? Wait for eventfd to tell us they refilled. */
657 if (head == vq->num) {
658 if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
659 vhost_disable_notify(&vs->dev, vq);
660 continue;
661 }
662 break;
663 }
664
665/* FIXME: BIDI operation */
666 if (out == 1 && in == 1) {
667 data_direction = DMA_NONE;
668 data_first = 0;
669 data_num = 0;
670 } else if (out == 1 && in > 1) {
671 data_direction = DMA_FROM_DEVICE;
672 data_first = out + 1;
673 data_num = in - 1;
674 } else if (out > 1 && in == 1) {
675 data_direction = DMA_TO_DEVICE;
676 data_first = 1;
677 data_num = out - 1;
678 } else {
679 vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
680 out, in);
681 break;
682 }
683
684 /*
685 * Check for a sane resp buffer so we can report errors to
686 * the guest.
687 */
688 if (unlikely(vq->iov[out].iov_len !=
689 sizeof(struct virtio_scsi_cmd_resp))) {
690 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
691 " bytes\n", vq->iov[out].iov_len);
692 break;
693 }
694
695 if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
696 vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
697 " bytes\n", vq->iov[0].iov_len);
698 break;
699 }
700 pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
701 " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
702 ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
703 sizeof(v_req));
704 if (unlikely(ret)) {
705 vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
706 break;
707 }
708
709 exp_data_len = 0;
710 for (i = 0; i < data_num; i++)
711 exp_data_len += vq->iov[data_first + i].iov_len;
712
713 tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req,
714 exp_data_len, data_direction);
715 if (IS_ERR(tv_cmd)) {
716 vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
717 PTR_ERR(tv_cmd));
718 break;
719 }
720 pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
721 ": %d\n", tv_cmd, exp_data_len, data_direction);
722
723 tv_cmd->tvc_vhost = vs;
724
725 if (unlikely(vq->iov[out].iov_len !=
726 sizeof(struct virtio_scsi_cmd_resp))) {
727 vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
728 " bytes, out: %d, in: %d\n",
729 vq->iov[out].iov_len, out, in);
730 break;
731 }
732
733 tv_cmd->tvc_resp = vq->iov[out].iov_base;
734
735 /*
736 * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
737 * that will be used by tcm_vhost_new_cmd_map() and down into
738 * target_setup_cmd_from_cdb()
739 */
740 memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
741 /*
742 * Check that the recieved CDB size does not exceeded our
743 * hardcoded max for tcm_vhost
744 */
745 /* TODO what if cdb was too small for varlen cdb header? */
746 if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
747 TCM_VHOST_MAX_CDB_SIZE)) {
748 vq_err(vq, "Received SCSI CDB with command_size: %d that"
749 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
750 scsi_command_size(tv_cmd->tvc_cdb),
751 TCM_VHOST_MAX_CDB_SIZE);
752 break; /* TODO */
753 }
754 tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
755
756 pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
757 tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
758
759 if (data_direction != DMA_NONE) {
760 ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
761 &vq->iov[data_first], data_num,
762 data_direction == DMA_TO_DEVICE);
763 if (unlikely(ret)) {
764 vq_err(vq, "Failed to map iov to sgl\n");
765 break; /* TODO */
766 }
767 }
768
769 /*
770 * Save the descriptor from vhost_get_vq_desc() to be used to
771 * complete the virtio-scsi request in TCM callback context via
772 * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
773 */
774 tv_cmd->tvc_vq_desc = head;
775 /*
776 * Dispatch tv_cmd descriptor for cmwq execution in process
777 * context provided by tcm_vhost_workqueue. This also ensures
778 * tv_cmd is executed on the same kworker CPU as this vhost
779 * thread to gain positive L2 cache locality effects..
780 */
781 INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
782 queue_work(tcm_vhost_workqueue, &tv_cmd->work);
783 }
784
785 mutex_unlock(&vq->mutex);
786}
787
788static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
789{
790 pr_err("%s: The handling func for control queue.\n", __func__);
791}
792
793static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
794{
795 pr_err("%s: The handling func for event queue.\n", __func__);
796}
797
798static void vhost_scsi_handle_kick(struct vhost_work *work)
799{
800 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
801 poll.work);
802 struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
803
804 vhost_scsi_handle_vq(vs);
805}
806
807/*
808 * Called from vhost_scsi_ioctl() context to walk the list of available
809 * tcm_vhost_tpg with an active struct tcm_vhost_nexus
810 */
811static int vhost_scsi_set_endpoint(
812 struct vhost_scsi *vs,
813 struct vhost_scsi_target *t)
814{
815 struct tcm_vhost_tport *tv_tport;
816 struct tcm_vhost_tpg *tv_tpg;
817 int index;
818
819 mutex_lock(&vs->dev.mutex);
820 /* Verify that ring has been setup correctly. */
821 for (index = 0; index < vs->dev.nvqs; ++index) {
822 /* Verify that ring has been setup correctly. */
823 if (!vhost_vq_access_ok(&vs->vqs[index])) {
824 mutex_unlock(&vs->dev.mutex);
825 return -EFAULT;
826 }
827 }
828
829 if (vs->vs_tpg) {
830 mutex_unlock(&vs->dev.mutex);
831 return -EEXIST;
832 }
833 mutex_unlock(&vs->dev.mutex);
834
835 mutex_lock(&tcm_vhost_mutex);
836 list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
837 mutex_lock(&tv_tpg->tv_tpg_mutex);
838 if (!tv_tpg->tpg_nexus) {
839 mutex_unlock(&tv_tpg->tv_tpg_mutex);
840 continue;
841 }
842 if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) {
843 mutex_unlock(&tv_tpg->tv_tpg_mutex);
844 continue;
845 }
846 tv_tport = tv_tpg->tport;
847
848 if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
849 (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
850 atomic_inc(&tv_tpg->tv_tpg_vhost_count);
851 smp_mb__after_atomic_inc();
852 mutex_unlock(&tv_tpg->tv_tpg_mutex);
853 mutex_unlock(&tcm_vhost_mutex);
854
855 mutex_lock(&vs->dev.mutex);
856 vs->vs_tpg = tv_tpg;
857 atomic_inc(&vs->vhost_ref_cnt);
858 smp_mb__after_atomic_inc();
859 mutex_unlock(&vs->dev.mutex);
860 return 0;
861 }
862 mutex_unlock(&tv_tpg->tv_tpg_mutex);
863 }
864 mutex_unlock(&tcm_vhost_mutex);
865 return -EINVAL;
866}
867
868static int vhost_scsi_clear_endpoint(
869 struct vhost_scsi *vs,
870 struct vhost_scsi_target *t)
871{
872 struct tcm_vhost_tport *tv_tport;
873 struct tcm_vhost_tpg *tv_tpg;
874 int index;
875
876 mutex_lock(&vs->dev.mutex);
877 /* Verify that ring has been setup correctly. */
878 for (index = 0; index < vs->dev.nvqs; ++index) {
879 if (!vhost_vq_access_ok(&vs->vqs[index])) {
880 mutex_unlock(&vs->dev.mutex);
881 return -EFAULT;
882 }
883 }
884
885 if (!vs->vs_tpg) {
886 mutex_unlock(&vs->dev.mutex);
887 return -ENODEV;
888 }
889 tv_tpg = vs->vs_tpg;
890 tv_tport = tv_tpg->tport;
891
892 if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
893 (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
894 mutex_unlock(&vs->dev.mutex);
895 pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
896 " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
897 tv_tport->tport_name, tv_tpg->tport_tpgt,
898 t->vhost_wwpn, t->vhost_tpgt);
899 return -EINVAL;
900 }
901 atomic_dec(&tv_tpg->tv_tpg_vhost_count);
902 vs->vs_tpg = NULL;
903 mutex_unlock(&vs->dev.mutex);
904
905 return 0;
906}
907
908static int vhost_scsi_open(struct inode *inode, struct file *f)
909{
910 struct vhost_scsi *s;
911 int r;
912
913 s = kzalloc(sizeof(*s), GFP_KERNEL);
914 if (!s)
915 return -ENOMEM;
916
917 vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
918 INIT_LIST_HEAD(&s->vs_completion_list);
919 spin_lock_init(&s->vs_completion_lock);
920
921 s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick;
922 s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick;
923 s->vqs[2].handle_kick = vhost_scsi_handle_kick;
924 r = vhost_dev_init(&s->dev, s->vqs, 3);
925 if (r < 0) {
926 kfree(s);
927 return r;
928 }
929
930 f->private_data = s;
931 return 0;
932}
933
934static int vhost_scsi_release(struct inode *inode, struct file *f)
935{
936 struct vhost_scsi *s = f->private_data;
937
938 if (s->vs_tpg && s->vs_tpg->tport) {
939 struct vhost_scsi_target backend;
940
941 memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
942 sizeof(backend.vhost_wwpn));
943 backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
944 vhost_scsi_clear_endpoint(s, &backend);
945 }
946
947 vhost_dev_cleanup(&s->dev, false);
948 kfree(s);
949 return 0;
950}
951
952static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
953{
954 if (features & ~VHOST_FEATURES)
955 return -EOPNOTSUPP;
956
957 mutex_lock(&vs->dev.mutex);
958 if ((features & (1 << VHOST_F_LOG_ALL)) &&
959 !vhost_log_access_ok(&vs->dev)) {
960 mutex_unlock(&vs->dev.mutex);
961 return -EFAULT;
962 }
963 vs->dev.acked_features = features;
964 /* TODO possibly smp_wmb() and flush vqs */
965 mutex_unlock(&vs->dev.mutex);
966 return 0;
967}
968
969static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
970 unsigned long arg)
971{
972 struct vhost_scsi *vs = f->private_data;
973 struct vhost_scsi_target backend;
974 void __user *argp = (void __user *)arg;
975 u64 __user *featurep = argp;
976 u64 features;
977 int r;
978
979 switch (ioctl) {
980 case VHOST_SCSI_SET_ENDPOINT:
981 if (copy_from_user(&backend, argp, sizeof backend))
982 return -EFAULT;
983
984 return vhost_scsi_set_endpoint(vs, &backend);
985 case VHOST_SCSI_CLEAR_ENDPOINT:
986 if (copy_from_user(&backend, argp, sizeof backend))
987 return -EFAULT;
988
989 return vhost_scsi_clear_endpoint(vs, &backend);
990 case VHOST_SCSI_GET_ABI_VERSION:
991 if (copy_from_user(&backend, argp, sizeof backend))
992 return -EFAULT;
993
994 backend.abi_version = VHOST_SCSI_ABI_VERSION;
995
996 if (copy_to_user(argp, &backend, sizeof backend))
997 return -EFAULT;
998 return 0;
999 case VHOST_GET_FEATURES:
1000 features = VHOST_FEATURES;
1001 if (copy_to_user(featurep, &features, sizeof features))
1002 return -EFAULT;
1003 return 0;
1004 case VHOST_SET_FEATURES:
1005 if (copy_from_user(&features, featurep, sizeof features))
1006 return -EFAULT;
1007 return vhost_scsi_set_features(vs, features);
1008 default:
1009 mutex_lock(&vs->dev.mutex);
1010 r = vhost_dev_ioctl(&vs->dev, ioctl, arg);
1011 mutex_unlock(&vs->dev.mutex);
1012 return r;
1013 }
1014}
1015
1016static const struct file_operations vhost_scsi_fops = {
1017 .owner = THIS_MODULE,
1018 .release = vhost_scsi_release,
1019 .unlocked_ioctl = vhost_scsi_ioctl,
1020 /* TODO compat ioctl? */
1021 .open = vhost_scsi_open,
1022 .llseek = noop_llseek,
1023};
1024
1025static struct miscdevice vhost_scsi_misc = {
1026 MISC_DYNAMIC_MINOR,
1027 "vhost-scsi",
1028 &vhost_scsi_fops,
1029};
1030
1031static int __init vhost_scsi_register(void)
1032{
1033 return misc_register(&vhost_scsi_misc);
1034}
1035
1036static int vhost_scsi_deregister(void)
1037{
1038 return misc_deregister(&vhost_scsi_misc);
1039}
1040
1041static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
1042{
1043 switch (tport->tport_proto_id) {
1044 case SCSI_PROTOCOL_SAS:
1045 return "SAS";
1046 case SCSI_PROTOCOL_FCP:
1047 return "FCP";
1048 case SCSI_PROTOCOL_ISCSI:
1049 return "iSCSI";
1050 default:
1051 break;
1052 }
1053
1054 return "Unknown";
1055}
1056
1057static int tcm_vhost_port_link(
1058 struct se_portal_group *se_tpg,
1059 struct se_lun *lun)
1060{
1061 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1062 struct tcm_vhost_tpg, se_tpg);
1063
1064 atomic_inc(&tv_tpg->tv_tpg_port_count);
1065 smp_mb__after_atomic_inc();
1066
1067 return 0;
1068}
1069
1070static void tcm_vhost_port_unlink(
1071 struct se_portal_group *se_tpg,
1072 struct se_lun *se_lun)
1073{
1074 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1075 struct tcm_vhost_tpg, se_tpg);
1076
1077 atomic_dec(&tv_tpg->tv_tpg_port_count);
1078 smp_mb__after_atomic_dec();
1079}
1080
1081static struct se_node_acl *tcm_vhost_make_nodeacl(
1082 struct se_portal_group *se_tpg,
1083 struct config_group *group,
1084 const char *name)
1085{
1086 struct se_node_acl *se_nacl, *se_nacl_new;
1087 struct tcm_vhost_nacl *nacl;
1088 u64 wwpn = 0;
1089 u32 nexus_depth;
1090
1091 /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1092 return ERR_PTR(-EINVAL); */
1093 se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
1094 if (!se_nacl_new)
1095 return ERR_PTR(-ENOMEM);
1096
1097 nexus_depth = 1;
1098 /*
1099 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
1100 * when converting a NodeACL from demo mode -> explict
1101 */
1102 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
1103 name, nexus_depth);
1104 if (IS_ERR(se_nacl)) {
1105 tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
1106 return se_nacl;
1107 }
1108 /*
1109 * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
1110 */
1111 nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
1112 nacl->iport_wwpn = wwpn;
1113
1114 return se_nacl;
1115}
1116
1117static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
1118{
1119 struct tcm_vhost_nacl *nacl = container_of(se_acl,
1120 struct tcm_vhost_nacl, se_node_acl);
1121 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
1122 kfree(nacl);
1123}
1124
1125static int tcm_vhost_make_nexus(
1126 struct tcm_vhost_tpg *tv_tpg,
1127 const char *name)
1128{
1129 struct se_portal_group *se_tpg;
1130 struct tcm_vhost_nexus *tv_nexus;
1131
1132 mutex_lock(&tv_tpg->tv_tpg_mutex);
1133 if (tv_tpg->tpg_nexus) {
1134 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1135 pr_debug("tv_tpg->tpg_nexus already exists\n");
1136 return -EEXIST;
1137 }
1138 se_tpg = &tv_tpg->se_tpg;
1139
1140 tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
1141 if (!tv_nexus) {
1142 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1143 pr_err("Unable to allocate struct tcm_vhost_nexus\n");
1144 return -ENOMEM;
1145 }
1146 /*
1147 * Initialize the struct se_session pointer
1148 */
1149 tv_nexus->tvn_se_sess = transport_init_session();
1150 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1151 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1152 kfree(tv_nexus);
1153 return -ENOMEM;
1154 }
1155 /*
1156 * Since we are running in 'demo mode' this call with generate a
1157 * struct se_node_acl for the tcm_vhost struct se_portal_group with
1158 * the SCSI Initiator port name of the passed configfs group 'name'.
1159 */
1160 tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
1161 se_tpg, (unsigned char *)name);
1162 if (!tv_nexus->tvn_se_sess->se_node_acl) {
1163 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1164 pr_debug("core_tpg_check_initiator_node_acl() failed"
1165 " for %s\n", name);
1166 transport_free_session(tv_nexus->tvn_se_sess);
1167 kfree(tv_nexus);
1168 return -ENOMEM;
1169 }
1170 /*
1171 * Now register the TCM vHost virtual I_T Nexus as active with the
1172 * call to __transport_register_session()
1173 */
1174 __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
1175 tv_nexus->tvn_se_sess, tv_nexus);
1176 tv_tpg->tpg_nexus = tv_nexus;
1177
1178 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1179 return 0;
1180}
1181
1182static int tcm_vhost_drop_nexus(
1183 struct tcm_vhost_tpg *tpg)
1184{
1185 struct se_session *se_sess;
1186 struct tcm_vhost_nexus *tv_nexus;
1187
1188 mutex_lock(&tpg->tv_tpg_mutex);
1189 tv_nexus = tpg->tpg_nexus;
1190 if (!tv_nexus) {
1191 mutex_unlock(&tpg->tv_tpg_mutex);
1192 return -ENODEV;
1193 }
1194
1195 se_sess = tv_nexus->tvn_se_sess;
1196 if (!se_sess) {
1197 mutex_unlock(&tpg->tv_tpg_mutex);
1198 return -ENODEV;
1199 }
1200
1201 if (atomic_read(&tpg->tv_tpg_port_count)) {
1202 mutex_unlock(&tpg->tv_tpg_mutex);
1203 pr_err("Unable to remove TCM_vHost I_T Nexus with"
1204 " active TPG port count: %d\n",
1205 atomic_read(&tpg->tv_tpg_port_count));
1206 return -EPERM;
1207 }
1208
1209 if (atomic_read(&tpg->tv_tpg_vhost_count)) {
1210 mutex_unlock(&tpg->tv_tpg_mutex);
1211 pr_err("Unable to remove TCM_vHost I_T Nexus with"
1212 " active TPG vhost count: %d\n",
1213 atomic_read(&tpg->tv_tpg_vhost_count));
1214 return -EPERM;
1215 }
1216
1217 pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated"
1218 " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
1219 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1220 /*
1221 * Release the SCSI I_T Nexus to the emulated vHost Target Port
1222 */
1223 transport_deregister_session(tv_nexus->tvn_se_sess);
1224 tpg->tpg_nexus = NULL;
1225 mutex_unlock(&tpg->tv_tpg_mutex);
1226
1227 kfree(tv_nexus);
1228 return 0;
1229}
1230
1231static ssize_t tcm_vhost_tpg_show_nexus(
1232 struct se_portal_group *se_tpg,
1233 char *page)
1234{
1235 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1236 struct tcm_vhost_tpg, se_tpg);
1237 struct tcm_vhost_nexus *tv_nexus;
1238 ssize_t ret;
1239
1240 mutex_lock(&tv_tpg->tv_tpg_mutex);
1241 tv_nexus = tv_tpg->tpg_nexus;
1242 if (!tv_nexus) {
1243 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1244 return -ENODEV;
1245 }
1246 ret = snprintf(page, PAGE_SIZE, "%s\n",
1247 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1248 mutex_unlock(&tv_tpg->tv_tpg_mutex);
1249
1250 return ret;
1251}
1252
1253static ssize_t tcm_vhost_tpg_store_nexus(
1254 struct se_portal_group *se_tpg,
1255 const char *page,
1256 size_t count)
1257{
1258 struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
1259 struct tcm_vhost_tpg, se_tpg);
1260 struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
1261 unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
1262 int ret;
1263 /*
1264 * Shutdown the active I_T nexus if 'NULL' is passed..
1265 */
1266 if (!strncmp(page, "NULL", 4)) {
1267 ret = tcm_vhost_drop_nexus(tv_tpg);
1268 return (!ret) ? count : ret;
1269 }
1270 /*
1271 * Otherwise make sure the passed virtual Initiator port WWN matches
1272 * the fabric protocol_id set in tcm_vhost_make_tport(), and call
1273 * tcm_vhost_make_nexus().
1274 */
1275 if (strlen(page) >= TCM_VHOST_NAMELEN) {
1276 pr_err("Emulated NAA Sas Address: %s, exceeds"
1277 " max: %d\n", page, TCM_VHOST_NAMELEN);
1278 return -EINVAL;
1279 }
1280 snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
1281
1282 ptr = strstr(i_port, "naa.");
1283 if (ptr) {
1284 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1285 pr_err("Passed SAS Initiator Port %s does not"
1286 " match target port protoid: %s\n", i_port,
1287 tcm_vhost_dump_proto_id(tport_wwn));
1288 return -EINVAL;
1289 }
1290 port_ptr = &i_port[0];
1291 goto check_newline;
1292 }
1293 ptr = strstr(i_port, "fc.");
1294 if (ptr) {
1295 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1296 pr_err("Passed FCP Initiator Port %s does not"
1297 " match target port protoid: %s\n", i_port,
1298 tcm_vhost_dump_proto_id(tport_wwn));
1299 return -EINVAL;
1300 }
1301 port_ptr = &i_port[3]; /* Skip over "fc." */
1302 goto check_newline;
1303 }
1304 ptr = strstr(i_port, "iqn.");
1305 if (ptr) {
1306 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1307 pr_err("Passed iSCSI Initiator Port %s does not"
1308 " match target port protoid: %s\n", i_port,
1309 tcm_vhost_dump_proto_id(tport_wwn));
1310 return -EINVAL;
1311 }
1312 port_ptr = &i_port[0];
1313 goto check_newline;
1314 }
1315 pr_err("Unable to locate prefix for emulated Initiator Port:"
1316 " %s\n", i_port);
1317 return -EINVAL;
1318 /*
1319 * Clear any trailing newline for the NAA WWN
1320 */
1321check_newline:
1322 if (i_port[strlen(i_port)-1] == '\n')
1323 i_port[strlen(i_port)-1] = '\0';
1324
1325 ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
1326 if (ret < 0)
1327 return ret;
1328
1329 return count;
1330}
1331
1332TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
1333
1334static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
1335 &tcm_vhost_tpg_nexus.attr,
1336 NULL,
1337};
1338
1339static struct se_portal_group *tcm_vhost_make_tpg(
1340 struct se_wwn *wwn,
1341 struct config_group *group,
1342 const char *name)
1343{
1344 struct tcm_vhost_tport *tport = container_of(wwn,
1345 struct tcm_vhost_tport, tport_wwn);
1346
1347 struct tcm_vhost_tpg *tpg;
1348 unsigned long tpgt;
1349 int ret;
1350
1351 if (strstr(name, "tpgt_") != name)
1352 return ERR_PTR(-EINVAL);
1353 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
1354 return ERR_PTR(-EINVAL);
1355
1356 tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
1357 if (!tpg) {
1358 pr_err("Unable to allocate struct tcm_vhost_tpg");
1359 return ERR_PTR(-ENOMEM);
1360 }
1361 mutex_init(&tpg->tv_tpg_mutex);
1362 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1363 tpg->tport = tport;
1364 tpg->tport_tpgt = tpgt;
1365
1366 ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
1367 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1368 if (ret < 0) {
1369 kfree(tpg);
1370 return NULL;
1371 }
1372 mutex_lock(&tcm_vhost_mutex);
1373 list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
1374 mutex_unlock(&tcm_vhost_mutex);
1375
1376 return &tpg->se_tpg;
1377}
1378
1379static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
1380{
1381 struct tcm_vhost_tpg *tpg = container_of(se_tpg,
1382 struct tcm_vhost_tpg, se_tpg);
1383
1384 mutex_lock(&tcm_vhost_mutex);
1385 list_del(&tpg->tv_tpg_list);
1386 mutex_unlock(&tcm_vhost_mutex);
1387 /*
1388 * Release the virtual I_T Nexus for this vHost TPG
1389 */
1390 tcm_vhost_drop_nexus(tpg);
1391 /*
1392 * Deregister the se_tpg from TCM..
1393 */
1394 core_tpg_deregister(se_tpg);
1395 kfree(tpg);
1396}
1397
1398static struct se_wwn *tcm_vhost_make_tport(
1399 struct target_fabric_configfs *tf,
1400 struct config_group *group,
1401 const char *name)
1402{
1403 struct tcm_vhost_tport *tport;
1404 char *ptr;
1405 u64 wwpn = 0;
1406 int off = 0;
1407
1408 /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
1409 return ERR_PTR(-EINVAL); */
1410
1411 tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
1412 if (!tport) {
1413 pr_err("Unable to allocate struct tcm_vhost_tport");
1414 return ERR_PTR(-ENOMEM);
1415 }
1416 tport->tport_wwpn = wwpn;
1417 /*
1418 * Determine the emulated Protocol Identifier and Target Port Name
1419 * based on the incoming configfs directory name.
1420 */
1421 ptr = strstr(name, "naa.");
1422 if (ptr) {
1423 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1424 goto check_len;
1425 }
1426 ptr = strstr(name, "fc.");
1427 if (ptr) {
1428 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1429 off = 3; /* Skip over "fc." */
1430 goto check_len;
1431 }
1432 ptr = strstr(name, "iqn.");
1433 if (ptr) {
1434 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1435 goto check_len;
1436 }
1437
1438 pr_err("Unable to locate prefix for emulated Target Port:"
1439 " %s\n", name);
1440 kfree(tport);
1441 return ERR_PTR(-EINVAL);
1442
1443check_len:
1444 if (strlen(name) >= TCM_VHOST_NAMELEN) {
1445 pr_err("Emulated %s Address: %s, exceeds"
1446 " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
1447 TCM_VHOST_NAMELEN);
1448 kfree(tport);
1449 return ERR_PTR(-EINVAL);
1450 }
1451 snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
1452
1453 pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
1454 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
1455
1456 return &tport->tport_wwn;
1457}
1458
1459static void tcm_vhost_drop_tport(struct se_wwn *wwn)
1460{
1461 struct tcm_vhost_tport *tport = container_of(wwn,
1462 struct tcm_vhost_tport, tport_wwn);
1463
1464 pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
1465 " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
1466 tport->tport_name);
1467
1468 kfree(tport);
1469}
1470
1471static ssize_t tcm_vhost_wwn_show_attr_version(
1472 struct target_fabric_configfs *tf,
1473 char *page)
1474{
1475 return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
1476 "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1477 utsname()->machine);
1478}
1479
1480TF_WWN_ATTR_RO(tcm_vhost, version);
1481
1482static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
1483 &tcm_vhost_wwn_version.attr,
1484 NULL,
1485};
1486
1487static struct target_core_fabric_ops tcm_vhost_ops = {
1488 .get_fabric_name = tcm_vhost_get_fabric_name,
1489 .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
1490 .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
1491 .tpg_get_tag = tcm_vhost_get_tag,
1492 .tpg_get_default_depth = tcm_vhost_get_default_depth,
1493 .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
1494 .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
1495 .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
1496 .tpg_check_demo_mode = tcm_vhost_check_true,
1497 .tpg_check_demo_mode_cache = tcm_vhost_check_true,
1498 .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
1499 .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
1500 .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
1501 .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
1502 .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
1503 .release_cmd = tcm_vhost_release_cmd,
1504 .shutdown_session = tcm_vhost_shutdown_session,
1505 .close_session = tcm_vhost_close_session,
1506 .sess_get_index = tcm_vhost_sess_get_index,
1507 .sess_get_initiator_sid = NULL,
1508 .write_pending = tcm_vhost_write_pending,
1509 .write_pending_status = tcm_vhost_write_pending_status,
1510 .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
1511 .get_task_tag = tcm_vhost_get_task_tag,
1512 .get_cmd_state = tcm_vhost_get_cmd_state,
1513 .queue_data_in = tcm_vhost_queue_data_in,
1514 .queue_status = tcm_vhost_queue_status,
1515 .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
1516 .get_fabric_sense_len = tcm_vhost_get_fabric_sense_len,
1517 .set_fabric_sense_len = tcm_vhost_set_fabric_sense_len,
1518 /*
1519 * Setup callers for generic logic in target_core_fabric_configfs.c
1520 */
1521 .fabric_make_wwn = tcm_vhost_make_tport,
1522 .fabric_drop_wwn = tcm_vhost_drop_tport,
1523 .fabric_make_tpg = tcm_vhost_make_tpg,
1524 .fabric_drop_tpg = tcm_vhost_drop_tpg,
1525 .fabric_post_link = tcm_vhost_port_link,
1526 .fabric_pre_unlink = tcm_vhost_port_unlink,
1527 .fabric_make_np = NULL,
1528 .fabric_drop_np = NULL,
1529 .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
1530 .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
1531};
1532
1533static int tcm_vhost_register_configfs(void)
1534{
1535 struct target_fabric_configfs *fabric;
1536 int ret;
1537
1538 pr_debug("TCM_VHOST fabric module %s on %s/%s"
1539 " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
1540 utsname()->machine);
1541 /*
1542 * Register the top level struct config_item_type with TCM core
1543 */
1544 fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
1545 if (IS_ERR(fabric)) {
1546 pr_err("target_fabric_configfs_init() failed\n");
1547 return PTR_ERR(fabric);
1548 }
1549 /*
1550 * Setup fabric->tf_ops from our local tcm_vhost_ops
1551 */
1552 fabric->tf_ops = tcm_vhost_ops;
1553 /*
1554 * Setup default attribute lists for various fabric->tf_cit_tmpl
1555 */
1556 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
1557 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
1558 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1559 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1560 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1561 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1562 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1563 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1564 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1565 /*
1566 * Register the fabric for use within TCM
1567 */
1568 ret = target_fabric_configfs_register(fabric);
1569 if (ret < 0) {
1570 pr_err("target_fabric_configfs_register() failed"
1571 " for TCM_VHOST\n");
1572 return ret;
1573 }
1574 /*
1575 * Setup our local pointer to *fabric
1576 */
1577 tcm_vhost_fabric_configfs = fabric;
1578 pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
1579 return 0;
1580};
1581
1582static void tcm_vhost_deregister_configfs(void)
1583{
1584 if (!tcm_vhost_fabric_configfs)
1585 return;
1586
1587 target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
1588 tcm_vhost_fabric_configfs = NULL;
1589 pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
1590};
1591
1592static int __init tcm_vhost_init(void)
1593{
1594 int ret = -ENOMEM;
1595
1596 tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
1597 if (!tcm_vhost_workqueue)
1598 goto out;
1599
1600 ret = vhost_scsi_register();
1601 if (ret < 0)
1602 goto out_destroy_workqueue;
1603
1604 ret = tcm_vhost_register_configfs();
1605 if (ret < 0)
1606 goto out_vhost_scsi_deregister;
1607
1608 return 0;
1609
1610out_vhost_scsi_deregister:
1611 vhost_scsi_deregister();
1612out_destroy_workqueue:
1613 destroy_workqueue(tcm_vhost_workqueue);
1614out:
1615 return ret;
1616};
1617
1618static void tcm_vhost_exit(void)
1619{
1620 tcm_vhost_deregister_configfs();
1621 vhost_scsi_deregister();
1622 destroy_workqueue(tcm_vhost_workqueue);
1623};
1624
1625MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
1626MODULE_LICENSE("GPL");
1627module_init(tcm_vhost_init);
1628module_exit(tcm_vhost_exit);
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
new file mode 100644
index 000000000000..c983ed21e413
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.h
@@ -0,0 +1,101 @@
1#define TCM_VHOST_VERSION "v0.1"
2#define TCM_VHOST_NAMELEN 256
3#define TCM_VHOST_MAX_CDB_SIZE 32
4
5struct tcm_vhost_cmd {
6 /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
7 int tvc_vq_desc;
8 /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
9 u64 tvc_tag;
10 /* The number of scatterlists associated with this cmd */
11 u32 tvc_sgl_count;
12 /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
13 u32 tvc_lun;
14 /* Pointer to the SGL formatted memory from virtio-scsi */
15 struct scatterlist *tvc_sgl;
16 /* Pointer to response */
17 struct virtio_scsi_cmd_resp __user *tvc_resp;
18 /* Pointer to vhost_scsi for our device */
19 struct vhost_scsi *tvc_vhost;
20 /* The TCM I/O descriptor that is accessed via container_of() */
21 struct se_cmd tvc_se_cmd;
22 /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
23 struct work_struct work;
24 /* Copy of the incoming SCSI command descriptor block (CDB) */
25 unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
26 /* Sense buffer that will be mapped into outgoing status */
27 unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
28 /* Completed commands list, serviced from vhost worker thread */
29 struct list_head tvc_completion_list;
30};
31
32struct tcm_vhost_nexus {
33 /* Pointer to TCM session for I_T Nexus */
34 struct se_session *tvn_se_sess;
35};
36
37struct tcm_vhost_nacl {
38 /* Binary World Wide unique Port Name for Vhost Initiator port */
39 u64 iport_wwpn;
40 /* ASCII formatted WWPN for Sas Initiator port */
41 char iport_name[TCM_VHOST_NAMELEN];
42 /* Returned by tcm_vhost_make_nodeacl() */
43 struct se_node_acl se_node_acl;
44};
45
46struct tcm_vhost_tpg {
47 /* Vhost port target portal group tag for TCM */
48 u16 tport_tpgt;
49 /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
50 atomic_t tv_tpg_port_count;
51 /* Used for vhost_scsi device reference to tpg_nexus */
52 atomic_t tv_tpg_vhost_count;
53 /* list for tcm_vhost_list */
54 struct list_head tv_tpg_list;
55 /* Used to protect access for tpg_nexus */
56 struct mutex tv_tpg_mutex;
57 /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
58 struct tcm_vhost_nexus *tpg_nexus;
59 /* Pointer back to tcm_vhost_tport */
60 struct tcm_vhost_tport *tport;
61 /* Returned by tcm_vhost_make_tpg() */
62 struct se_portal_group se_tpg;
63};
64
65struct tcm_vhost_tport {
66 /* SCSI protocol the tport is providing */
67 u8 tport_proto_id;
68 /* Binary World Wide unique Port Name for Vhost Target port */
69 u64 tport_wwpn;
70 /* ASCII formatted WWPN for Vhost Target port */
71 char tport_name[TCM_VHOST_NAMELEN];
72 /* Returned by tcm_vhost_make_tport() */
73 struct se_wwn tport_wwn;
74};
75
76/*
77 * As per request from MST, keep TCM_VHOST related ioctl defines out of
78 * linux/vhost.h (user-space) for now..
79 */
80
81#include <linux/vhost.h>
82
83/*
84 * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
85 *
86 * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
87 * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
88 */
89
90#define VHOST_SCSI_ABI_VERSION 0
91
92struct vhost_scsi_target {
93 int abi_version;
94 unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
95 unsigned short vhost_tpgt;
96};
97
98/* VHOST_SCSI specific defines */
99#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
100#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
101#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 181fa8158a8b..858c9714b2f3 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -37,7 +37,6 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
37 */ 37 */
38 38
39struct zorro_bus { 39struct zorro_bus {
40 struct list_head devices; /* list of devices on this bus */
41 struct device dev; 40 struct device dev;
42}; 41};
43 42
@@ -136,7 +135,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
136 if (!bus) 135 if (!bus)
137 return -ENOMEM; 136 return -ENOMEM;
138 137
139 INIT_LIST_HEAD(&bus->devices);
140 bus->dev.parent = &pdev->dev; 138 bus->dev.parent = &pdev->dev;
141 dev_set_name(&bus->dev, "zorro"); 139 dev_set_name(&bus->dev, "zorro");
142 error = device_register(&bus->dev); 140 error = device_register(&bus->dev);