aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/exstorob.c12
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_idle.c6
-rw-r--r--drivers/acpi/processor_thermal.c6
-rw-r--r--drivers/acpi/processor_throttling.c30
-rw-r--r--drivers/acpi/video.c7
-rw-r--r--drivers/ata/ata_piix.c14
-rw-r--r--drivers/char/n_tty.c3
-rw-r--r--drivers/char/pty.c10
-rw-r--r--drivers/char/tty_ldisc.c10
-rw-r--r--drivers/clocksource/sh_cmt.c28
-rw-r--r--drivers/gpu/drm/drm_crtc.c40
-rw-r--r--drivers/gpu/drm/drm_edid.c72
-rw-r--r--drivers/gpu/drm/drm_sysfs.c51
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c86
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c51
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c71
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h20
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c13
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c96
-rw-r--r--drivers/gpu/drm/radeon/r300.c42
-rw-r--r--drivers/gpu/drm/radeon/r420.c13
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h16
-rw-r--r--drivers/gpu/drm/radeon/r520.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h26
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c3
-rw-r--r--drivers/gpu/drm/radeon/rs600.c82
-rw-r--r--drivers/gpu/drm/radeon/rs690.c65
-rw-r--r--drivers/gpu/drm/radeon/rv515.c19
-rw-r--r--drivers/i2c/busses/i2c-omap.c14
-rw-r--r--drivers/i2c/busses/i2c-stu300.c157
-rw-r--r--drivers/input/joydev.c68
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c1
-rw-r--r--drivers/input/keyboard/atkbd.c35
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h8
-rw-r--r--drivers/input/tablet/wacom_sys.c43
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c17
-rw-r--r--drivers/leds/ledtrig-gpio.c24
-rw-r--r--drivers/macintosh/via-maciisi.c2
-rw-r--r--drivers/md/dm-exception-store.c13
-rw-r--r--drivers/md/dm-exception-store.h4
-rw-r--r--drivers/md/dm-log-userspace-base.c39
-rw-r--r--drivers/md/dm-log-userspace-transfer.c6
-rw-r--r--drivers/md/dm-log-userspace-transfer.h2
-rw-r--r--drivers/md/dm-raid1.c8
-rw-r--r--drivers/md/dm-snap-persistent.c88
-rw-r--r--drivers/md/dm-snap.c23
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-table.c51
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/media/dvb/siano/Kconfig40
-rw-r--r--drivers/media/dvb/siano/Makefile9
-rw-r--r--drivers/media/dvb/siano/smsdvb.c44
-rw-r--r--drivers/media/dvb/siano/smssdio.c54
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c44
-rw-r--r--drivers/media/video/em28xx/em28xx.h1
-rw-r--r--drivers/media/video/gspca/Kconfig2
-rw-r--r--drivers/media/video/zr364xx.c2
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/8139cp.c5
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/arm/w90p910_ether.c4
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c8
-rw-r--r--drivers/net/atlx/atl1.c8
-rw-r--r--drivers/net/b44.c5
-rw-r--r--drivers/net/bnx2.c17
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/can/dev.c7
-rw-r--r--drivers/net/cnic.c143
-rw-r--r--drivers/net/cnic.h1
-rw-r--r--drivers/net/cnic_if.h1
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/ich8lan.c94
-rw-r--r--drivers/net/e1000e/netdev.c22
-rw-r--r--drivers/net/fec.c9
-rw-r--r--drivers/net/fec_mpc52xx.c5
-rw-r--r--drivers/net/gianfar.c12
-rw-r--r--drivers/net/ibm_newemac/core.c2
-rw-r--r--drivers/net/irda/au1k_ir.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c4
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c27
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c90
-rw-r--r--drivers/net/ixp2000/ixpdev.c5
-rw-r--r--drivers/net/macb.c7
-rw-r--r--drivers/net/mlx4/en_rx.c5
-rw-r--r--drivers/net/mlx4/en_tx.c5
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c105
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/smc91x.c40
-rw-r--r--drivers/net/tulip/tulip_core.c5
-rw-r--r--drivers/net/tun.c50
-rw-r--r--drivers/net/ucc_geth.c5
-rw-r--r--drivers/net/usb/pegasus.h2
-rw-r--r--drivers/net/via-rhine.c5
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/virtio_net.c61
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c5
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/libertas/assoc.c18
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h4
-rw-r--r--drivers/net/wireless/mwl8k.c31
-rw-r--r--drivers/net/wireless/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c14
-rw-r--r--drivers/net/yellowfin.c28
-rw-r--r--drivers/net/zorro8390.c3
-rw-r--r--drivers/pci/iov.c23
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/pci.h13
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/platform/x86/wmi.c8
-rw-r--r--drivers/pps/pps.c2
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/sbus/char/bbc_envctrl.c11
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c98
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h6
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c91
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c124
-rw-r--r--drivers/serial/Kconfig2
-rw-r--r--drivers/spi/spi_s3c24xx.c23
-rw-r--r--drivers/thermal/thermal_sys.c9
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c5
-rw-r--r--drivers/video/xen-fbfront.c8
-rw-r--r--drivers/watchdog/ar7_wdt.c2
158 files changed, 2227 insertions, 1221 deletions
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 67340cc70142..257706e7734f 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -70,6 +70,12 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
70 70
71 ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc); 71 ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc);
72 72
73 /* If Source and Target are the same, just return */
74
75 if (source_desc == target_desc) {
76 return_ACPI_STATUS(AE_OK);
77 }
78
73 /* We know that source_desc is a buffer by now */ 79 /* We know that source_desc is a buffer by now */
74 80
75 buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer); 81 buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer);
@@ -161,6 +167,12 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
161 167
162 ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc); 168 ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc);
163 169
170 /* If Source and Target are the same, just return */
171
172 if (source_desc == target_desc) {
173 return_ACPI_STATUS(AE_OK);
174 }
175
164 /* We know that source_desc is a string by now */ 176 /* We know that source_desc is a string by now */
165 177
166 buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer); 178 buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 84e0f3c07442..2cc4b3033872 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -1151,6 +1151,9 @@ static int __init acpi_processor_init(void)
1151{ 1151{
1152 int result = 0; 1152 int result = 0;
1153 1153
1154 if (acpi_disabled)
1155 return 0;
1156
1154 memset(&errata, 0, sizeof(errata)); 1157 memset(&errata, 0, sizeof(errata));
1155 1158
1156#ifdef CONFIG_SMP 1159#ifdef CONFIG_SMP
@@ -1197,6 +1200,9 @@ out_proc:
1197 1200
1198static void __exit acpi_processor_exit(void) 1201static void __exit acpi_processor_exit(void)
1199{ 1202{
1203 if (acpi_disabled)
1204 return;
1205
1200 acpi_processor_ppc_exit(); 1206 acpi_processor_ppc_exit();
1201 1207
1202 acpi_thermal_cpufreq_exit(); 1208 acpi_thermal_cpufreq_exit();
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 0efa59e7e3af..66393d5c4c7c 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -162,8 +162,9 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
162 pr->power.timer_broadcast_on_state = state; 162 pr->power.timer_broadcast_on_state = state;
163} 163}
164 164
165static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) 165static void lapic_timer_propagate_broadcast(void *arg)
166{ 166{
167 struct acpi_processor *pr = (struct acpi_processor *) arg;
167 unsigned long reason; 168 unsigned long reason;
168 169
169 reason = pr->power.timer_broadcast_on_state < INT_MAX ? 170 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
@@ -635,7 +636,8 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
635 working++; 636 working++;
636 } 637 }
637 638
638 lapic_timer_propagate_broadcast(pr); 639 smp_call_function_single(pr->id, lapic_timer_propagate_broadcast,
640 pr, 1);
639 641
640 return (working); 642 return (working);
641} 643}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 39838c666032..31adda1099e0 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -66,7 +66,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
66 if (pr->limit.thermal.tx > tx) 66 if (pr->limit.thermal.tx > tx)
67 tx = pr->limit.thermal.tx; 67 tx = pr->limit.thermal.tx;
68 68
69 result = acpi_processor_set_throttling(pr, tx); 69 result = acpi_processor_set_throttling(pr, tx, false);
70 if (result) 70 if (result)
71 goto end; 71 goto end;
72 } 72 }
@@ -421,12 +421,12 @@ processor_set_cur_state(struct thermal_cooling_device *cdev,
421 421
422 if (state <= max_pstate) { 422 if (state <= max_pstate) {
423 if (pr->flags.throttling && pr->throttling.state) 423 if (pr->flags.throttling && pr->throttling.state)
424 result = acpi_processor_set_throttling(pr, 0); 424 result = acpi_processor_set_throttling(pr, 0, false);
425 cpufreq_set_cur_state(pr->id, state); 425 cpufreq_set_cur_state(pr->id, state);
426 } else { 426 } else {
427 cpufreq_set_cur_state(pr->id, max_pstate); 427 cpufreq_set_cur_state(pr->id, max_pstate);
428 result = acpi_processor_set_throttling(pr, 428 result = acpi_processor_set_throttling(pr,
429 state - max_pstate); 429 state - max_pstate, false);
430 } 430 }
431 return result; 431 return result;
432} 432}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 227543789ba9..ae39797aab55 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -62,7 +62,8 @@ struct throttling_tstate {
62#define THROTTLING_POSTCHANGE (2) 62#define THROTTLING_POSTCHANGE (2)
63 63
64static int acpi_processor_get_throttling(struct acpi_processor *pr); 64static int acpi_processor_get_throttling(struct acpi_processor *pr);
65int acpi_processor_set_throttling(struct acpi_processor *pr, int state); 65int acpi_processor_set_throttling(struct acpi_processor *pr,
66 int state, bool force);
66 67
67static int acpi_processor_update_tsd_coord(void) 68static int acpi_processor_update_tsd_coord(void)
68{ 69{
@@ -361,7 +362,7 @@ int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
361 */ 362 */
362 target_state = throttling_limit; 363 target_state = throttling_limit;
363 } 364 }
364 return acpi_processor_set_throttling(pr, target_state); 365 return acpi_processor_set_throttling(pr, target_state, false);
365} 366}
366 367
367/* 368/*
@@ -839,10 +840,10 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
839 if (ret >= 0) { 840 if (ret >= 0) {
840 state = acpi_get_throttling_state(pr, value); 841 state = acpi_get_throttling_state(pr, value);
841 if (state == -1) { 842 if (state == -1) {
842 ACPI_WARNING((AE_INFO, 843 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
843 "Invalid throttling state, reset")); 844 "Invalid throttling state, reset\n"));
844 state = 0; 845 state = 0;
845 ret = acpi_processor_set_throttling(pr, state); 846 ret = acpi_processor_set_throttling(pr, state, true);
846 if (ret) 847 if (ret)
847 return ret; 848 return ret;
848 } 849 }
@@ -915,7 +916,7 @@ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
915} 916}
916 917
917static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr, 918static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
918 int state) 919 int state, bool force)
919{ 920{
920 u32 value = 0; 921 u32 value = 0;
921 u32 duty_mask = 0; 922 u32 duty_mask = 0;
@@ -930,7 +931,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
930 if (!pr->flags.throttling) 931 if (!pr->flags.throttling)
931 return -ENODEV; 932 return -ENODEV;
932 933
933 if (state == pr->throttling.state) 934 if (!force && (state == pr->throttling.state))
934 return 0; 935 return 0;
935 936
936 if (state < pr->throttling_platform_limit) 937 if (state < pr->throttling_platform_limit)
@@ -988,7 +989,7 @@ static int acpi_processor_set_throttling_fadt(struct acpi_processor *pr,
988} 989}
989 990
990static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, 991static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
991 int state) 992 int state, bool force)
992{ 993{
993 int ret; 994 int ret;
994 acpi_integer value; 995 acpi_integer value;
@@ -1002,7 +1003,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
1002 if (!pr->flags.throttling) 1003 if (!pr->flags.throttling)
1003 return -ENODEV; 1004 return -ENODEV;
1004 1005
1005 if (state == pr->throttling.state) 1006 if (!force && (state == pr->throttling.state))
1006 return 0; 1007 return 0;
1007 1008
1008 if (state < pr->throttling_platform_limit) 1009 if (state < pr->throttling_platform_limit)
@@ -1018,7 +1019,8 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
1018 return 0; 1019 return 0;
1019} 1020}
1020 1021
1021int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 1022int acpi_processor_set_throttling(struct acpi_processor *pr,
1023 int state, bool force)
1022{ 1024{
1023 cpumask_var_t saved_mask; 1025 cpumask_var_t saved_mask;
1024 int ret = 0; 1026 int ret = 0;
@@ -1070,7 +1072,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1070 /* FIXME: use work_on_cpu() */ 1072 /* FIXME: use work_on_cpu() */
1071 set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 1073 set_cpus_allowed_ptr(current, cpumask_of(pr->id));
1072 ret = p_throttling->acpi_processor_set_throttling(pr, 1074 ret = p_throttling->acpi_processor_set_throttling(pr,
1073 t_state.target_state); 1075 t_state.target_state, force);
1074 } else { 1076 } else {
1075 /* 1077 /*
1076 * When the T-state coordination is SW_ALL or HW_ALL, 1078 * When the T-state coordination is SW_ALL or HW_ALL,
@@ -1103,7 +1105,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
1103 set_cpus_allowed_ptr(current, cpumask_of(i)); 1105 set_cpus_allowed_ptr(current, cpumask_of(i));
1104 ret = match_pr->throttling. 1106 ret = match_pr->throttling.
1105 acpi_processor_set_throttling( 1107 acpi_processor_set_throttling(
1106 match_pr, t_state.target_state); 1108 match_pr, t_state.target_state, force);
1107 } 1109 }
1108 } 1110 }
1109 /* 1111 /*
@@ -1201,7 +1203,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
1201 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1203 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1202 "Disabling throttling (was T%d)\n", 1204 "Disabling throttling (was T%d)\n",
1203 pr->throttling.state)); 1205 pr->throttling.state));
1204 result = acpi_processor_set_throttling(pr, 0); 1206 result = acpi_processor_set_throttling(pr, 0, false);
1205 if (result) 1207 if (result)
1206 goto end; 1208 goto end;
1207 } 1209 }
@@ -1307,7 +1309,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
1307 if (strcmp(tmpbuf, charp) != 0) 1309 if (strcmp(tmpbuf, charp) != 0)
1308 return -EINVAL; 1310 return -EINVAL;
1309 1311
1310 result = acpi_processor_set_throttling(pr, state_val); 1312 result = acpi_processor_set_throttling(pr, state_val, false);
1311 if (result) 1313 if (result)
1312 return result; 1314 return result;
1313 1315
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8851315ce858..60ea984c84a0 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -2004,8 +2004,11 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
2004 status = acpi_remove_notify_handler(device->dev->handle, 2004 status = acpi_remove_notify_handler(device->dev->handle,
2005 ACPI_DEVICE_NOTIFY, 2005 ACPI_DEVICE_NOTIFY,
2006 acpi_video_device_notify); 2006 acpi_video_device_notify);
2007 sysfs_remove_link(&device->backlight->dev.kobj, "device"); 2007 if (device->backlight) {
2008 backlight_device_unregister(device->backlight); 2008 sysfs_remove_link(&device->backlight->dev.kobj, "device");
2009 backlight_device_unregister(device->backlight);
2010 device->backlight = NULL;
2011 }
2009 if (device->cdev) { 2012 if (device->cdev) {
2010 sysfs_remove_link(&device->dev->dev.kobj, 2013 sysfs_remove_link(&device->dev->dev.kobj,
2011 "thermal_cooling"); 2014 "thermal_cooling");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 56b8a3ff1286..9ac4e378992e 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -664,6 +664,8 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
664 return ata_sff_prereset(link, deadline); 664 return ata_sff_prereset(link, deadline);
665} 665}
666 666
667static DEFINE_SPINLOCK(piix_lock);
668
667/** 669/**
668 * piix_set_piomode - Initialize host controller PATA PIO timings 670 * piix_set_piomode - Initialize host controller PATA PIO timings
669 * @ap: Port whose timings we are configuring 671 * @ap: Port whose timings we are configuring
@@ -677,8 +679,9 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
677 679
678static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) 680static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
679{ 681{
680 unsigned int pio = adev->pio_mode - XFER_PIO_0;
681 struct pci_dev *dev = to_pci_dev(ap->host->dev); 682 struct pci_dev *dev = to_pci_dev(ap->host->dev);
683 unsigned long flags;
684 unsigned int pio = adev->pio_mode - XFER_PIO_0;
682 unsigned int is_slave = (adev->devno != 0); 685 unsigned int is_slave = (adev->devno != 0);
683 unsigned int master_port= ap->port_no ? 0x42 : 0x40; 686 unsigned int master_port= ap->port_no ? 0x42 : 0x40;
684 unsigned int slave_port = 0x44; 687 unsigned int slave_port = 0x44;
@@ -708,6 +711,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
708 if (adev->class == ATA_DEV_ATA) 711 if (adev->class == ATA_DEV_ATA)
709 control |= 4; /* PPE enable */ 712 control |= 4; /* PPE enable */
710 713
714 spin_lock_irqsave(&piix_lock, flags);
715
711 /* PIO configuration clears DTE unconditionally. It will be 716 /* PIO configuration clears DTE unconditionally. It will be
712 * programmed in set_dmamode which is guaranteed to be called 717 * programmed in set_dmamode which is guaranteed to be called
713 * after set_piomode if any DMA mode is available. 718 * after set_piomode if any DMA mode is available.
@@ -747,6 +752,8 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
747 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno)); 752 udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
748 pci_write_config_byte(dev, 0x48, udma_enable); 753 pci_write_config_byte(dev, 0x48, udma_enable);
749 } 754 }
755
756 spin_unlock_irqrestore(&piix_lock, flags);
750} 757}
751 758
752/** 759/**
@@ -764,6 +771,7 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
764static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) 771static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich)
765{ 772{
766 struct pci_dev *dev = to_pci_dev(ap->host->dev); 773 struct pci_dev *dev = to_pci_dev(ap->host->dev);
774 unsigned long flags;
767 u8 master_port = ap->port_no ? 0x42 : 0x40; 775 u8 master_port = ap->port_no ? 0x42 : 0x40;
768 u16 master_data; 776 u16 master_data;
769 u8 speed = adev->dma_mode; 777 u8 speed = adev->dma_mode;
@@ -777,6 +785,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
777 { 2, 1 }, 785 { 2, 1 },
778 { 2, 3 }, }; 786 { 2, 3 }, };
779 787
788 spin_lock_irqsave(&piix_lock, flags);
789
780 pci_read_config_word(dev, master_port, &master_data); 790 pci_read_config_word(dev, master_port, &master_data);
781 if (ap->udma_mask) 791 if (ap->udma_mask)
782 pci_read_config_byte(dev, 0x48, &udma_enable); 792 pci_read_config_byte(dev, 0x48, &udma_enable);
@@ -867,6 +877,8 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
867 /* Don't scribble on 0x48 if the controller does not support UDMA */ 877 /* Don't scribble on 0x48 if the controller does not support UDMA */
868 if (ap->udma_mask) 878 if (ap->udma_mask)
869 pci_write_config_byte(dev, 0x48, udma_enable); 879 pci_write_config_byte(dev, 0x48, udma_enable);
880
881 spin_unlock_irqrestore(&piix_lock, flags);
870} 882}
871 883
872/** 884/**
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 973be2f44195..4e28b35024ec 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
300 if (space < 2) 300 if (space < 2)
301 return -1; 301 return -1;
302 tty->canon_column = tty->column = 0; 302 tty->canon_column = tty->column = 0;
303 tty_put_char(tty, '\r'); 303 tty->ops->write(tty, "\r\n", 2);
304 tty_put_char(tty, c);
305 return 2; 304 return 2;
306 } 305 }
307 tty->canon_column = tty->column; 306 tty->canon_column = tty->column;
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index d083c73d784a..b33d6688e910 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -109,21 +109,13 @@ static int pty_space(struct tty_struct *to)
109 * the other side of the pty/tty pair. 109 * the other side of the pty/tty pair.
110 */ 110 */
111 111
112static int pty_write(struct tty_struct *tty, const unsigned char *buf, 112static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
113 int count)
114{ 113{
115 struct tty_struct *to = tty->link; 114 struct tty_struct *to = tty->link;
116 int c;
117 115
118 if (tty->stopped) 116 if (tty->stopped)
119 return 0; 117 return 0;
120 118
121 /* This isn't locked but our 8K is quite sloppy so no
122 big deal */
123
124 c = pty_space(to);
125 if (c > count)
126 c = count;
127 if (c > 0) { 119 if (c > 0) {
128 /* Stuff the data into the input queue of the other end */ 120 /* Stuff the data into the input queue of the other end */
129 c = tty_insert_flip_string(to, buf, c); 121 c = tty_insert_flip_string(to, buf, c);
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 1733d3439ad2..e48af9f79219 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -508,8 +508,9 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
508 * be obtained while the delayed work queue halt ensures that no more 508 * be obtained while the delayed work queue halt ensures that no more
509 * data is fed to the ldisc. 509 * data is fed to the ldisc.
510 * 510 *
511 * In order to wait for any existing references to complete see 511 * You need to do a 'flush_scheduled_work()' (outside the ldisc_mutex)
512 * tty_ldisc_wait_idle. 512 * in order to make sure any currently executing ldisc work is also
513 * flushed.
513 */ 514 */
514 515
515static int tty_ldisc_halt(struct tty_struct *tty) 516static int tty_ldisc_halt(struct tty_struct *tty)
@@ -753,11 +754,14 @@ void tty_ldisc_hangup(struct tty_struct *tty)
753 * N_TTY. 754 * N_TTY.
754 */ 755 */
755 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { 756 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
757 /* Make sure the old ldisc is quiescent */
758 tty_ldisc_halt(tty);
759 flush_scheduled_work();
760
756 /* Avoid racing set_ldisc or tty_ldisc_release */ 761 /* Avoid racing set_ldisc or tty_ldisc_release */
757 mutex_lock(&tty->ldisc_mutex); 762 mutex_lock(&tty->ldisc_mutex);
758 if (tty->ldisc) { /* Not yet closed */ 763 if (tty->ldisc) { /* Not yet closed */
759 /* Switch back to N_TTY */ 764 /* Switch back to N_TTY */
760 tty_ldisc_halt(tty);
761 tty_ldisc_reinit(tty); 765 tty_ldisc_reinit(tty);
762 /* At this point we have a closed ldisc and we want to 766 /* At this point we have a closed ldisc and we want to
763 reopen it. We could defer this to the next open but 767 reopen it. We could defer this to the next open but
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 2964f5f4a7ef..6b3e0c2f33e2 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -40,6 +40,7 @@ struct sh_cmt_priv {
40 struct platform_device *pdev; 40 struct platform_device *pdev;
41 41
42 unsigned long flags; 42 unsigned long flags;
43 unsigned long flags_suspend;
43 unsigned long match_value; 44 unsigned long match_value;
44 unsigned long next_match_value; 45 unsigned long next_match_value;
45 unsigned long max_match_value; 46 unsigned long max_match_value;
@@ -667,11 +668,38 @@ static int __devexit sh_cmt_remove(struct platform_device *pdev)
667 return -EBUSY; /* cannot unregister clockevent and clocksource */ 668 return -EBUSY; /* cannot unregister clockevent and clocksource */
668} 669}
669 670
671static int sh_cmt_suspend(struct device *dev)
672{
673 struct platform_device *pdev = to_platform_device(dev);
674 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
675
676 /* save flag state and stop CMT channel */
677 p->flags_suspend = p->flags;
678 sh_cmt_stop(p, p->flags);
679 return 0;
680}
681
682static int sh_cmt_resume(struct device *dev)
683{
684 struct platform_device *pdev = to_platform_device(dev);
685 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
686
687 /* start CMT channel from saved state */
688 sh_cmt_start(p, p->flags_suspend);
689 return 0;
690}
691
692static struct dev_pm_ops sh_cmt_dev_pm_ops = {
693 .suspend = sh_cmt_suspend,
694 .resume = sh_cmt_resume,
695};
696
670static struct platform_driver sh_cmt_device_driver = { 697static struct platform_driver sh_cmt_device_driver = {
671 .probe = sh_cmt_probe, 698 .probe = sh_cmt_probe,
672 .remove = __devexit_p(sh_cmt_remove), 699 .remove = __devexit_p(sh_cmt_remove),
673 .driver = { 700 .driver = {
674 .name = "sh_cmt", 701 .name = "sh_cmt",
702 .pm = &sh_cmt_dev_pm_ops,
675 } 703 }
676}; 704};
677 705
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 33be210d6723..2f631c75f704 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -258,31 +258,6 @@ void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
258EXPORT_SYMBOL(drm_mode_object_find); 258EXPORT_SYMBOL(drm_mode_object_find);
259 259
260/** 260/**
261 * drm_crtc_from_fb - find the CRTC structure associated with an fb
262 * @dev: DRM device
263 * @fb: framebuffer in question
264 *
265 * LOCKING:
266 * Caller must hold mode_config lock.
267 *
268 * Find CRTC in the mode_config structure that matches @fb.
269 *
270 * RETURNS:
271 * Pointer to the CRTC or NULL if it wasn't found.
272 */
273struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
274 struct drm_framebuffer *fb)
275{
276 struct drm_crtc *crtc;
277
278 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
279 if (crtc->fb == fb)
280 return crtc;
281 }
282 return NULL;
283}
284
285/**
286 * drm_framebuffer_init - initialize a framebuffer 261 * drm_framebuffer_init - initialize a framebuffer
287 * @dev: DRM device 262 * @dev: DRM device
288 * 263 *
@@ -328,11 +303,20 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
328{ 303{
329 struct drm_device *dev = fb->dev; 304 struct drm_device *dev = fb->dev;
330 struct drm_crtc *crtc; 305 struct drm_crtc *crtc;
306 struct drm_mode_set set;
307 int ret;
331 308
332 /* remove from any CRTC */ 309 /* remove from any CRTC */
333 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 310 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
334 if (crtc->fb == fb) 311 if (crtc->fb == fb) {
335 crtc->fb = NULL; 312 /* should turn off the crtc */
313 memset(&set, 0, sizeof(struct drm_mode_set));
314 set.crtc = crtc;
315 set.fb = NULL;
316 ret = crtc->funcs->set_config(&set);
317 if (ret)
318 DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
319 }
336 } 320 }
337 321
338 drm_mode_object_put(dev, &fb->base); 322 drm_mode_object_put(dev, &fb->base);
@@ -1511,7 +1495,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1511 set.mode = mode; 1495 set.mode = mode;
1512 set.connectors = connector_set; 1496 set.connectors = connector_set;
1513 set.num_connectors = crtc_req->count_connectors; 1497 set.num_connectors = crtc_req->count_connectors;
1514 set.fb =fb; 1498 set.fb = fb;
1515 ret = crtc->funcs->set_config(&set); 1499 ret = crtc->funcs->set_config(&set);
1516 1500
1517out: 1501out:
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 80cc6d06d61b..7f2728bbc16c 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -502,12 +502,40 @@ static int add_detailed_info(struct drm_connector *connector,
502 struct detailed_non_pixel *data = &timing->data.other_data; 502 struct detailed_non_pixel *data = &timing->data.other_data;
503 struct drm_display_mode *newmode; 503 struct drm_display_mode *newmode;
504 504
505 /* EDID up to and including 1.2 may put monitor info here */ 505 /* X server check is version 1.1 or higher */
506 if (edid->version == 1 && edid->revision < 3) 506 if (edid->version == 1 && edid->revision >= 1 &&
507 continue; 507 !timing->pixel_clock) {
508 508 /* Other timing or info */
509 /* Detailed mode timing */ 509 switch (data->type) {
510 if (timing->pixel_clock) { 510 case EDID_DETAIL_MONITOR_SERIAL:
511 break;
512 case EDID_DETAIL_MONITOR_STRING:
513 break;
514 case EDID_DETAIL_MONITOR_RANGE:
515 /* Get monitor range data */
516 break;
517 case EDID_DETAIL_MONITOR_NAME:
518 break;
519 case EDID_DETAIL_MONITOR_CPDATA:
520 break;
521 case EDID_DETAIL_STD_MODES:
522 /* Five modes per detailed section */
523 for (j = 0; j < 5; i++) {
524 struct std_timing *std;
525 struct drm_display_mode *newmode;
526
527 std = &data->data.timings[j];
528 newmode = drm_mode_std(dev, std);
529 if (newmode) {
530 drm_mode_probed_add(connector, newmode);
531 modes++;
532 }
533 }
534 break;
535 default:
536 break;
537 }
538 } else {
511 newmode = drm_mode_detailed(dev, edid, timing, quirks); 539 newmode = drm_mode_detailed(dev, edid, timing, quirks);
512 if (!newmode) 540 if (!newmode)
513 continue; 541 continue;
@@ -518,38 +546,6 @@ static int add_detailed_info(struct drm_connector *connector,
518 drm_mode_probed_add(connector, newmode); 546 drm_mode_probed_add(connector, newmode);
519 547
520 modes++; 548 modes++;
521 continue;
522 }
523
524 /* Other timing or info */
525 switch (data->type) {
526 case EDID_DETAIL_MONITOR_SERIAL:
527 break;
528 case EDID_DETAIL_MONITOR_STRING:
529 break;
530 case EDID_DETAIL_MONITOR_RANGE:
531 /* Get monitor range data */
532 break;
533 case EDID_DETAIL_MONITOR_NAME:
534 break;
535 case EDID_DETAIL_MONITOR_CPDATA:
536 break;
537 case EDID_DETAIL_STD_MODES:
538 /* Five modes per detailed section */
539 for (j = 0; j < 5; i++) {
540 struct std_timing *std;
541 struct drm_display_mode *newmode;
542
543 std = &data->data.timings[j];
544 newmode = drm_mode_std(dev, std);
545 if (newmode) {
546 drm_mode_probed_add(connector, newmode);
547 modes++;
548 }
549 }
550 break;
551 default:
552 break;
553 } 549 }
554 } 550 }
555 551
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 85ec31b3ff00..f7a615b80c70 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -22,44 +22,50 @@
22#define to_drm_minor(d) container_of(d, struct drm_minor, kdev) 22#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
23#define to_drm_connector(d) container_of(d, struct drm_connector, kdev) 23#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
24 24
25static struct device_type drm_sysfs_device_minor = {
26 .name = "drm_minor"
27};
28
25/** 29/**
26 * drm_sysfs_suspend - DRM class suspend hook 30 * drm_class_suspend - DRM class suspend hook
27 * @dev: Linux device to suspend 31 * @dev: Linux device to suspend
28 * @state: power state to enter 32 * @state: power state to enter
29 * 33 *
30 * Just figures out what the actual struct drm_device associated with 34 * Just figures out what the actual struct drm_device associated with
31 * @dev is and calls its suspend hook, if present. 35 * @dev is and calls its suspend hook, if present.
32 */ 36 */
33static int drm_sysfs_suspend(struct device *dev, pm_message_t state) 37static int drm_class_suspend(struct device *dev, pm_message_t state)
34{ 38{
35 struct drm_minor *drm_minor = to_drm_minor(dev); 39 if (dev->type == &drm_sysfs_device_minor) {
36 struct drm_device *drm_dev = drm_minor->dev; 40 struct drm_minor *drm_minor = to_drm_minor(dev);
37 41 struct drm_device *drm_dev = drm_minor->dev;
38 if (drm_minor->type == DRM_MINOR_LEGACY && 42
39 !drm_core_check_feature(drm_dev, DRIVER_MODESET) && 43 if (drm_minor->type == DRM_MINOR_LEGACY &&
40 drm_dev->driver->suspend) 44 !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
41 return drm_dev->driver->suspend(drm_dev, state); 45 drm_dev->driver->suspend)
42 46 return drm_dev->driver->suspend(drm_dev, state);
47 }
43 return 0; 48 return 0;
44} 49}
45 50
46/** 51/**
47 * drm_sysfs_resume - DRM class resume hook 52 * drm_class_resume - DRM class resume hook
48 * @dev: Linux device to resume 53 * @dev: Linux device to resume
49 * 54 *
50 * Just figures out what the actual struct drm_device associated with 55 * Just figures out what the actual struct drm_device associated with
51 * @dev is and calls its resume hook, if present. 56 * @dev is and calls its resume hook, if present.
52 */ 57 */
53static int drm_sysfs_resume(struct device *dev) 58static int drm_class_resume(struct device *dev)
54{ 59{
55 struct drm_minor *drm_minor = to_drm_minor(dev); 60 if (dev->type == &drm_sysfs_device_minor) {
56 struct drm_device *drm_dev = drm_minor->dev; 61 struct drm_minor *drm_minor = to_drm_minor(dev);
57 62 struct drm_device *drm_dev = drm_minor->dev;
58 if (drm_minor->type == DRM_MINOR_LEGACY && 63
59 !drm_core_check_feature(drm_dev, DRIVER_MODESET) && 64 if (drm_minor->type == DRM_MINOR_LEGACY &&
60 drm_dev->driver->resume) 65 !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
61 return drm_dev->driver->resume(drm_dev); 66 drm_dev->driver->resume)
62 67 return drm_dev->driver->resume(drm_dev);
68 }
63 return 0; 69 return 0;
64} 70}
65 71
@@ -99,8 +105,8 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
99 goto err_out; 105 goto err_out;
100 } 106 }
101 107
102 class->suspend = drm_sysfs_suspend; 108 class->suspend = drm_class_suspend;
103 class->resume = drm_sysfs_resume; 109 class->resume = drm_class_resume;
104 110
105 err = class_create_file(class, &class_attr_version); 111 err = class_create_file(class, &class_attr_version);
106 if (err) 112 if (err)
@@ -480,6 +486,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
480 minor->kdev.class = drm_class; 486 minor->kdev.class = drm_class;
481 minor->kdev.release = drm_sysfs_device_release; 487 minor->kdev.release = drm_sysfs_device_release;
482 minor->kdev.devt = minor->device; 488 minor->kdev.devt = minor->device;
489 minor->kdev.type = &drm_sysfs_device_minor;
483 if (minor->type == DRM_MINOR_CONTROL) 490 if (minor->type == DRM_MINOR_CONTROL)
484 minor_str = "controlD%d"; 491 minor_str = "controlD%d";
485 else if (minor->type == DRM_MINOR_RENDER) 492 else if (minor->type == DRM_MINOR_RENDER)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7537f57d8a87..5b4f87e55621 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -222,6 +222,7 @@ typedef struct drm_i915_private {
222 unsigned int edp_support:1; 222 unsigned int edp_support:1;
223 int lvds_ssc_freq; 223 int lvds_ssc_freq;
224 224
225 int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
225 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 226 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
226 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 227 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
227 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 228 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -384,6 +385,9 @@ typedef struct drm_i915_private {
384 */ 385 */
385 struct list_head inactive_list; 386 struct list_head inactive_list;
386 387
388 /** LRU list of objects with fence regs on them. */
389 struct list_head fence_list;
390
387 /** 391 /**
388 * List of breadcrumbs associated with GPU requests currently 392 * List of breadcrumbs associated with GPU requests currently
389 * outstanding. 393 * outstanding.
@@ -451,6 +455,9 @@ struct drm_i915_gem_object {
451 /** This object's place on the active/flushing/inactive lists */ 455 /** This object's place on the active/flushing/inactive lists */
452 struct list_head list; 456 struct list_head list;
453 457
458 /** This object's place on the fenced object LRU */
459 struct list_head fence_list;
460
454 /** 461 /**
455 * This is set if the object is on the active or flushing lists 462 * This is set if the object is on the active or flushing lists
456 * (has pending rendering), and is not set if it's on inactive (ready 463 * (has pending rendering), and is not set if it's on inactive (ready
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 140bee142fc2..0c07a755b3a3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -978,6 +978,7 @@ int
978i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 978i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
979 struct drm_file *file_priv) 979 struct drm_file *file_priv)
980{ 980{
981 struct drm_i915_private *dev_priv = dev->dev_private;
981 struct drm_i915_gem_set_domain *args = data; 982 struct drm_i915_gem_set_domain *args = data;
982 struct drm_gem_object *obj; 983 struct drm_gem_object *obj;
983 uint32_t read_domains = args->read_domains; 984 uint32_t read_domains = args->read_domains;
@@ -1010,8 +1011,18 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1010 obj, obj->size, read_domains, write_domain); 1011 obj, obj->size, read_domains, write_domain);
1011#endif 1012#endif
1012 if (read_domains & I915_GEM_DOMAIN_GTT) { 1013 if (read_domains & I915_GEM_DOMAIN_GTT) {
1014 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1015
1013 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1016 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1014 1017
1018 /* Update the LRU on the fence for the CPU access that's
1019 * about to occur.
1020 */
1021 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1022 list_move_tail(&obj_priv->fence_list,
1023 &dev_priv->mm.fence_list);
1024 }
1025
1015 /* Silently promote "you're not bound, there was nothing to do" 1026 /* Silently promote "you're not bound, there was nothing to do"
1016 * to success, since the client was just asking us to 1027 * to success, since the client was just asking us to
1017 * make sure everything was done. 1028 * make sure everything was done.
@@ -1155,8 +1166,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1155 } 1166 }
1156 1167
1157 /* Need a new fence register? */ 1168 /* Need a new fence register? */
1158 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1169 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1159 obj_priv->tiling_mode != I915_TILING_NONE) {
1160 ret = i915_gem_object_get_fence_reg(obj); 1170 ret = i915_gem_object_get_fence_reg(obj);
1161 if (ret) { 1171 if (ret) {
1162 mutex_unlock(&dev->struct_mutex); 1172 mutex_unlock(&dev->struct_mutex);
@@ -2208,6 +2218,12 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2208 struct drm_i915_gem_object *old_obj_priv = NULL; 2218 struct drm_i915_gem_object *old_obj_priv = NULL;
2209 int i, ret, avail; 2219 int i, ret, avail;
2210 2220
2221 /* Just update our place in the LRU if our fence is getting used. */
2222 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2223 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2224 return 0;
2225 }
2226
2211 switch (obj_priv->tiling_mode) { 2227 switch (obj_priv->tiling_mode) {
2212 case I915_TILING_NONE: 2228 case I915_TILING_NONE:
2213 WARN(1, "allocating a fence for non-tiled object?\n"); 2229 WARN(1, "allocating a fence for non-tiled object?\n");
@@ -2229,7 +2245,6 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2229 } 2245 }
2230 2246
2231 /* First try to find a free reg */ 2247 /* First try to find a free reg */
2232try_again:
2233 avail = 0; 2248 avail = 0;
2234 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2249 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2235 reg = &dev_priv->fence_regs[i]; 2250 reg = &dev_priv->fence_regs[i];
@@ -2243,52 +2258,41 @@ try_again:
2243 2258
2244 /* None available, try to steal one or wait for a user to finish */ 2259 /* None available, try to steal one or wait for a user to finish */
2245 if (i == dev_priv->num_fence_regs) { 2260 if (i == dev_priv->num_fence_regs) {
2246 uint32_t seqno = dev_priv->mm.next_gem_seqno; 2261 struct drm_gem_object *old_obj = NULL;
2247 2262
2248 if (avail == 0) 2263 if (avail == 0)
2249 return -ENOSPC; 2264 return -ENOSPC;
2250 2265
2251 for (i = dev_priv->fence_reg_start; 2266 list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
2252 i < dev_priv->num_fence_regs; i++) { 2267 fence_list) {
2253 uint32_t this_seqno; 2268 old_obj = old_obj_priv->obj;
2254 2269
2255 reg = &dev_priv->fence_regs[i]; 2270 reg = &dev_priv->fence_regs[old_obj_priv->fence_reg];
2256 old_obj_priv = reg->obj->driver_private;
2257 2271
2258 if (old_obj_priv->pin_count) 2272 if (old_obj_priv->pin_count)
2259 continue; 2273 continue;
2260 2274
2275 /* Take a reference, as otherwise the wait_rendering
2276 * below may cause the object to get freed out from
2277 * under us.
2278 */
2279 drm_gem_object_reference(old_obj);
2280
2261 /* i915 uses fences for GPU access to tiled buffers */ 2281 /* i915 uses fences for GPU access to tiled buffers */
2262 if (IS_I965G(dev) || !old_obj_priv->active) 2282 if (IS_I965G(dev) || !old_obj_priv->active)
2263 break; 2283 break;
2264 2284
2265 /* find the seqno of the first available fence */ 2285 /* This brings the object to the head of the LRU if it
2266 this_seqno = old_obj_priv->last_rendering_seqno; 2286 * had been written to. The only way this should
2267 if (this_seqno != 0 && 2287 * result in us waiting longer than the expected
2268 reg->obj->write_domain == 0 && 2288 * optimal amount of time is if there was a
2269 i915_seqno_passed(seqno, this_seqno)) 2289 * fence-using buffer later that was read-only.
2270 seqno = this_seqno; 2290 */
2271 } 2291 i915_gem_object_flush_gpu_write_domain(old_obj);
2272 2292 ret = i915_gem_object_wait_rendering(old_obj);
2273 /* 2293 if (ret != 0)
2274 * Now things get ugly... we have to wait for one of the
2275 * objects to finish before trying again.
2276 */
2277 if (i == dev_priv->num_fence_regs) {
2278 if (seqno == dev_priv->mm.next_gem_seqno) {
2279 i915_gem_flush(dev,
2280 I915_GEM_GPU_DOMAINS,
2281 I915_GEM_GPU_DOMAINS);
2282 seqno = i915_add_request(dev, NULL,
2283 I915_GEM_GPU_DOMAINS);
2284 if (seqno == 0)
2285 return -ENOMEM;
2286 }
2287
2288 ret = i915_wait_request(dev, seqno);
2289 if (ret)
2290 return ret; 2294 return ret;
2291 goto try_again; 2295 break;
2292 } 2296 }
2293 2297
2294 /* 2298 /*
@@ -2296,10 +2300,15 @@ try_again:
2296 * for this object next time we need it. 2300 * for this object next time we need it.
2297 */ 2301 */
2298 i915_gem_release_mmap(reg->obj); 2302 i915_gem_release_mmap(reg->obj);
2303 i = old_obj_priv->fence_reg;
2299 old_obj_priv->fence_reg = I915_FENCE_REG_NONE; 2304 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
2305 list_del_init(&old_obj_priv->fence_list);
2306 drm_gem_object_unreference(old_obj);
2300 } 2307 }
2301 2308
2302 obj_priv->fence_reg = i; 2309 obj_priv->fence_reg = i;
2310 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
2311
2303 reg->obj = obj; 2312 reg->obj = obj;
2304 2313
2305 if (IS_I965G(dev)) 2314 if (IS_I965G(dev))
@@ -2342,6 +2351,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2342 2351
2343 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; 2352 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
2344 obj_priv->fence_reg = I915_FENCE_REG_NONE; 2353 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2354 list_del_init(&obj_priv->fence_list);
2345} 2355}
2346 2356
2347/** 2357/**
@@ -3595,9 +3605,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3595 * Pre-965 chips need a fence register set up in order to 3605 * Pre-965 chips need a fence register set up in order to
3596 * properly handle tiled surfaces. 3606 * properly handle tiled surfaces.
3597 */ 3607 */
3598 if (!IS_I965G(dev) && 3608 if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
3599 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
3600 obj_priv->tiling_mode != I915_TILING_NONE) {
3601 ret = i915_gem_object_get_fence_reg(obj); 3609 ret = i915_gem_object_get_fence_reg(obj);
3602 if (ret != 0) { 3610 if (ret != 0) {
3603 if (ret != -EBUSY && ret != -ERESTARTSYS) 3611 if (ret != -EBUSY && ret != -ERESTARTSYS)
@@ -3806,6 +3814,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
3806 obj_priv->obj = obj; 3814 obj_priv->obj = obj;
3807 obj_priv->fence_reg = I915_FENCE_REG_NONE; 3815 obj_priv->fence_reg = I915_FENCE_REG_NONE;
3808 INIT_LIST_HEAD(&obj_priv->list); 3816 INIT_LIST_HEAD(&obj_priv->list);
3817 INIT_LIST_HEAD(&obj_priv->fence_list);
3809 3818
3810 return 0; 3819 return 0;
3811} 3820}
@@ -4253,6 +4262,7 @@ i915_gem_load(struct drm_device *dev)
4253 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4262 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4254 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4263 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4255 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4264 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4265 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4256 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4266 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4257 i915_gem_retire_work_handler); 4267 i915_gem_retire_work_handler);
4258 dev_priv->mm.next_gem_seqno = 1; 4268 dev_priv->mm.next_gem_seqno = 1;
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 300aee3296c2..f806fcc54e09 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -59,6 +59,16 @@ find_section(struct bdb_header *bdb, int section_id)
59 return NULL; 59 return NULL;
60} 60}
61 61
62static u16
63get_blocksize(void *p)
64{
65 u16 *block_ptr, block_size;
66
67 block_ptr = (u16 *)((char *)p - 2);
68 block_size = *block_ptr;
69 return block_size;
70}
71
62static void 72static void
63fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, 73fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
64 struct lvds_dvo_timing *dvo_timing) 74 struct lvds_dvo_timing *dvo_timing)
@@ -215,6 +225,41 @@ parse_general_features(struct drm_i915_private *dev_priv,
215} 225}
216 226
217static void 227static void
228parse_general_definitions(struct drm_i915_private *dev_priv,
229 struct bdb_header *bdb)
230{
231 struct bdb_general_definitions *general;
232 const int crt_bus_map_table[] = {
233 GPIOB,
234 GPIOA,
235 GPIOC,
236 GPIOD,
237 GPIOE,
238 GPIOF,
239 };
240
241 /* Set sensible defaults in case we can't find the general block
242 or it is the wrong chipset */
243 dev_priv->crt_ddc_bus = -1;
244
245 general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
246 if (general) {
247 u16 block_size = get_blocksize(general);
248 if (block_size >= sizeof(*general)) {
249 int bus_pin = general->crt_ddc_gmbus_pin;
250 DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin);
251 if ((bus_pin >= 1) && (bus_pin <= 6)) {
252 dev_priv->crt_ddc_bus =
253 crt_bus_map_table[bus_pin-1];
254 }
255 } else {
256 DRM_DEBUG("BDB_GD too small (%d). Invalid.\n",
257 block_size);
258 }
259 }
260}
261
262static void
218parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, 263parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
219 struct bdb_header *bdb) 264 struct bdb_header *bdb)
220{ 265{
@@ -222,7 +267,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
222 struct bdb_general_definitions *p_defs; 267 struct bdb_general_definitions *p_defs;
223 struct child_device_config *p_child; 268 struct child_device_config *p_child;
224 int i, child_device_num, count; 269 int i, child_device_num, count;
225 u16 block_size, *block_ptr; 270 u16 block_size;
226 271
227 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 272 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
228 if (!p_defs) { 273 if (!p_defs) {
@@ -240,8 +285,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
240 return; 285 return;
241 } 286 }
242 /* get the block size of general definitions */ 287 /* get the block size of general definitions */
243 block_ptr = (u16 *)((char *)p_defs - 2); 288 block_size = get_blocksize(p_defs);
244 block_size = *block_ptr;
245 /* get the number of child device */ 289 /* get the number of child device */
246 child_device_num = (block_size - sizeof(*p_defs)) / 290 child_device_num = (block_size - sizeof(*p_defs)) /
247 sizeof(*p_child); 291 sizeof(*p_child);
@@ -362,6 +406,7 @@ intel_init_bios(struct drm_device *dev)
362 406
363 /* Grab useful general definitions */ 407 /* Grab useful general definitions */
364 parse_general_features(dev_priv, bdb); 408 parse_general_features(dev_priv, bdb);
409 parse_general_definitions(dev_priv, bdb);
365 parse_lfp_panel_data(dev_priv, bdb); 410 parse_lfp_panel_data(dev_priv, bdb);
366 parse_sdvo_panel_data(dev_priv, bdb); 411 parse_sdvo_panel_data(dev_priv, bdb);
367 parse_sdvo_device_mapping(dev_priv, bdb); 412 parse_sdvo_device_mapping(dev_priv, bdb);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4cf8e2e88a40..590f81c8f594 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -508,6 +508,7 @@ void intel_crt_init(struct drm_device *dev)
508{ 508{
509 struct drm_connector *connector; 509 struct drm_connector *connector;
510 struct intel_output *intel_output; 510 struct intel_output *intel_output;
511 struct drm_i915_private *dev_priv = dev->dev_private;
511 u32 i2c_reg; 512 u32 i2c_reg;
512 513
513 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 514 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
@@ -527,8 +528,12 @@ void intel_crt_init(struct drm_device *dev)
527 /* Set up the DDC bus. */ 528 /* Set up the DDC bus. */
528 if (IS_IGDNG(dev)) 529 if (IS_IGDNG(dev))
529 i2c_reg = PCH_GPIOA; 530 i2c_reg = PCH_GPIOA;
530 else 531 else {
531 i2c_reg = GPIOA; 532 i2c_reg = GPIOA;
533 /* Use VBT information for CRT DDC if available */
534 if (dev_priv->crt_ddc_bus != -1)
535 i2c_reg = dev_priv->crt_ddc_bus;
536 }
532 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); 537 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
533 if (!intel_output->ddc_bus) { 538 if (!intel_output->ddc_bus) {
534 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 539 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
@@ -537,6 +542,10 @@ void intel_crt_init(struct drm_device *dev)
537 } 542 }
538 543
539 intel_output->type = INTEL_OUTPUT_ANALOG; 544 intel_output->type = INTEL_OUTPUT_ANALOG;
545 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
546 (1 << INTEL_ANALOG_CLONE_BIT) |
547 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
548 intel_output->crtc_mask = (1 << 0) | (1 << 1);
540 connector->interlace_allowed = 0; 549 connector->interlace_allowed = 0;
541 connector->doublescan_allowed = 0; 550 connector->doublescan_allowed = 0;
542 551
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d6fce2133413..3fadb5358858 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -666,7 +666,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
666 intel_clock_t clock; 666 intel_clock_t clock;
667 int err = target; 667 int err = target;
668 668
669 if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 669 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
670 (I915_READ(LVDS)) != 0) { 670 (I915_READ(LVDS)) != 0) {
671 /* 671 /*
672 * For LVDS, if the panel is on, just rely on its current 672 * For LVDS, if the panel is on, just rely on its current
@@ -2396,7 +2396,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2396 if (is_sdvo) { 2396 if (is_sdvo) {
2397 dpll |= DPLL_DVO_HIGH_SPEED; 2397 dpll |= DPLL_DVO_HIGH_SPEED;
2398 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 2398 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
2399 if (IS_I945G(dev) || IS_I945GM(dev)) 2399 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2400 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 2400 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
2401 else if (IS_IGDNG(dev)) 2401 else if (IS_IGDNG(dev))
2402 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 2402 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
@@ -3170,7 +3170,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
3170 3170
3171 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 3171 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3172 struct intel_output *intel_output = to_intel_output(connector); 3172 struct intel_output *intel_output = to_intel_output(connector);
3173 if (type_mask & (1 << intel_output->type)) 3173 if (type_mask & intel_output->clone_mask)
3174 index_mask |= (1 << entry); 3174 index_mask |= (1 << entry);
3175 entry++; 3175 entry++;
3176 } 3176 }
@@ -3218,30 +3218,30 @@ static void intel_setup_outputs(struct drm_device *dev)
3218 intel_dp_init(dev, PCH_DP_D); 3218 intel_dp_init(dev, PCH_DP_D);
3219 3219
3220 } else if (IS_I9XX(dev)) { 3220 } else if (IS_I9XX(dev)) {
3221 int found; 3221 bool found = false;
3222 u32 reg;
3223 3222
3224 if (I915_READ(SDVOB) & SDVO_DETECTED) { 3223 if (I915_READ(SDVOB) & SDVO_DETECTED) {
3225 found = intel_sdvo_init(dev, SDVOB); 3224 found = intel_sdvo_init(dev, SDVOB);
3226 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 3225 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
3227 intel_hdmi_init(dev, SDVOB); 3226 intel_hdmi_init(dev, SDVOB);
3227
3228 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 3228 if (!found && SUPPORTS_INTEGRATED_DP(dev))
3229 intel_dp_init(dev, DP_B); 3229 intel_dp_init(dev, DP_B);
3230 } 3230 }
3231 3231
3232 /* Before G4X SDVOC doesn't have its own detect register */ 3232 /* Before G4X SDVOC doesn't have its own detect register */
3233 if (IS_G4X(dev))
3234 reg = SDVOC;
3235 else
3236 reg = SDVOB;
3237 3233
3238 if (I915_READ(reg) & SDVO_DETECTED) { 3234 if (I915_READ(SDVOB) & SDVO_DETECTED)
3239 found = intel_sdvo_init(dev, SDVOC); 3235 found = intel_sdvo_init(dev, SDVOC);
3240 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 3236
3237 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
3238
3239 if (SUPPORTS_INTEGRATED_HDMI(dev))
3241 intel_hdmi_init(dev, SDVOC); 3240 intel_hdmi_init(dev, SDVOC);
3242 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 3241 if (SUPPORTS_INTEGRATED_DP(dev))
3243 intel_dp_init(dev, DP_C); 3242 intel_dp_init(dev, DP_C);
3244 } 3243 }
3244
3245 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 3245 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
3246 intel_dp_init(dev, DP_D); 3246 intel_dp_init(dev, DP_D);
3247 } else 3247 } else
@@ -3253,51 +3253,10 @@ static void intel_setup_outputs(struct drm_device *dev)
3253 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 3253 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3254 struct intel_output *intel_output = to_intel_output(connector); 3254 struct intel_output *intel_output = to_intel_output(connector);
3255 struct drm_encoder *encoder = &intel_output->enc; 3255 struct drm_encoder *encoder = &intel_output->enc;
3256 int crtc_mask = 0, clone_mask = 0;
3257 3256
3258 /* valid crtcs */ 3257 encoder->possible_crtcs = intel_output->crtc_mask;
3259 switch(intel_output->type) { 3258 encoder->possible_clones = intel_connector_clones(dev,
3260 case INTEL_OUTPUT_HDMI: 3259 intel_output->clone_mask);
3261 crtc_mask = ((1 << 0)|
3262 (1 << 1));
3263 clone_mask = ((1 << INTEL_OUTPUT_HDMI));
3264 break;
3265 case INTEL_OUTPUT_DVO:
3266 case INTEL_OUTPUT_SDVO:
3267 crtc_mask = ((1 << 0)|
3268 (1 << 1));
3269 clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
3270 (1 << INTEL_OUTPUT_DVO) |
3271 (1 << INTEL_OUTPUT_SDVO));
3272 break;
3273 case INTEL_OUTPUT_ANALOG:
3274 crtc_mask = ((1 << 0)|
3275 (1 << 1));
3276 clone_mask = ((1 << INTEL_OUTPUT_ANALOG) |
3277 (1 << INTEL_OUTPUT_DVO) |
3278 (1 << INTEL_OUTPUT_SDVO));
3279 break;
3280 case INTEL_OUTPUT_LVDS:
3281 crtc_mask = (1 << 1);
3282 clone_mask = (1 << INTEL_OUTPUT_LVDS);
3283 break;
3284 case INTEL_OUTPUT_TVOUT:
3285 crtc_mask = ((1 << 0) |
3286 (1 << 1));
3287 clone_mask = (1 << INTEL_OUTPUT_TVOUT);
3288 break;
3289 case INTEL_OUTPUT_DISPLAYPORT:
3290 crtc_mask = ((1 << 0) |
3291 (1 << 1));
3292 clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
3293 break;
3294 case INTEL_OUTPUT_EDP:
3295 crtc_mask = (1 << 1);
3296 clone_mask = (1 << INTEL_OUTPUT_EDP);
3297 break;
3298 }
3299 encoder->possible_crtcs = crtc_mask;
3300 encoder->possible_clones = intel_connector_clones(dev, clone_mask);
3301 } 3260 }
3302} 3261}
3303 3262
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a6ff15ac548a..f2afc4af4bc9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1254,6 +1254,18 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1254 else 1254 else
1255 intel_output->type = INTEL_OUTPUT_DISPLAYPORT; 1255 intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
1256 1256
1257 if (output_reg == DP_B)
1258 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1259 else if (output_reg == DP_C)
1260 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
1261 else if (output_reg == DP_D)
1262 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1263
1264 if (IS_eDP(intel_output)) {
1265 intel_output->crtc_mask = (1 << 1);
1266 intel_output->clone_mask = (1 << INTEL_OUTPUT_EDP);
1267 } else
1268 intel_output->crtc_mask = (1 << 0) | (1 << 1);
1257 connector->interlace_allowed = true; 1269 connector->interlace_allowed = true;
1258 connector->doublescan_allowed = 0; 1270 connector->doublescan_allowed = 0;
1259 1271
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d6f92ea1b553..25aa6facc12d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -57,6 +57,24 @@
57#define INTEL_OUTPUT_DISPLAYPORT 7 57#define INTEL_OUTPUT_DISPLAYPORT 7
58#define INTEL_OUTPUT_EDP 8 58#define INTEL_OUTPUT_EDP 8
59 59
60/* Intel Pipe Clone Bit */
61#define INTEL_HDMIB_CLONE_BIT 1
62#define INTEL_HDMIC_CLONE_BIT 2
63#define INTEL_HDMID_CLONE_BIT 3
64#define INTEL_HDMIE_CLONE_BIT 4
65#define INTEL_HDMIF_CLONE_BIT 5
66#define INTEL_SDVO_NON_TV_CLONE_BIT 6
67#define INTEL_SDVO_TV_CLONE_BIT 7
68#define INTEL_SDVO_LVDS_CLONE_BIT 8
69#define INTEL_ANALOG_CLONE_BIT 9
70#define INTEL_TV_CLONE_BIT 10
71#define INTEL_DP_B_CLONE_BIT 11
72#define INTEL_DP_C_CLONE_BIT 12
73#define INTEL_DP_D_CLONE_BIT 13
74#define INTEL_LVDS_CLONE_BIT 14
75#define INTEL_DVO_TMDS_CLONE_BIT 15
76#define INTEL_DVO_LVDS_CLONE_BIT 16
77
60#define INTEL_DVO_CHIP_NONE 0 78#define INTEL_DVO_CHIP_NONE 0
61#define INTEL_DVO_CHIP_LVDS 1 79#define INTEL_DVO_CHIP_LVDS 1
62#define INTEL_DVO_CHIP_TMDS 2 80#define INTEL_DVO_CHIP_TMDS 2
@@ -86,6 +104,8 @@ struct intel_output {
86 bool needs_tv_clock; 104 bool needs_tv_clock;
87 void *dev_priv; 105 void *dev_priv;
88 void (*hot_plug)(struct intel_output *); 106 void (*hot_plug)(struct intel_output *);
107 int crtc_mask;
108 int clone_mask;
89}; 109};
90 110
91struct intel_crtc { 111struct intel_crtc {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 13bff20930e8..a4d2606de778 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -435,14 +435,20 @@ void intel_dvo_init(struct drm_device *dev)
435 continue; 435 continue;
436 436
437 intel_output->type = INTEL_OUTPUT_DVO; 437 intel_output->type = INTEL_OUTPUT_DVO;
438 intel_output->crtc_mask = (1 << 0) | (1 << 1);
438 switch (dvo->type) { 439 switch (dvo->type) {
439 case INTEL_DVO_CHIP_TMDS: 440 case INTEL_DVO_CHIP_TMDS:
441 intel_output->clone_mask =
442 (1 << INTEL_DVO_TMDS_CLONE_BIT) |
443 (1 << INTEL_ANALOG_CLONE_BIT);
440 drm_connector_init(dev, connector, 444 drm_connector_init(dev, connector,
441 &intel_dvo_connector_funcs, 445 &intel_dvo_connector_funcs,
442 DRM_MODE_CONNECTOR_DVII); 446 DRM_MODE_CONNECTOR_DVII);
443 encoder_type = DRM_MODE_ENCODER_TMDS; 447 encoder_type = DRM_MODE_ENCODER_TMDS;
444 break; 448 break;
445 case INTEL_DVO_CHIP_LVDS: 449 case INTEL_DVO_CHIP_LVDS:
450 intel_output->clone_mask =
451 (1 << INTEL_DVO_LVDS_CLONE_BIT);
446 drm_connector_init(dev, connector, 452 drm_connector_init(dev, connector,
447 &intel_dvo_connector_funcs, 453 &intel_dvo_connector_funcs,
448 DRM_MODE_CONNECTOR_LVDS); 454 DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1842290cded3..fa304e136010 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -230,22 +230,28 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
230 230
231 connector->interlace_allowed = 0; 231 connector->interlace_allowed = 0;
232 connector->doublescan_allowed = 0; 232 connector->doublescan_allowed = 0;
233 intel_output->crtc_mask = (1 << 0) | (1 << 1);
233 234
234 /* Set up the DDC bus. */ 235 /* Set up the DDC bus. */
235 if (sdvox_reg == SDVOB) 236 if (sdvox_reg == SDVOB) {
237 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
236 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 238 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
237 else if (sdvox_reg == SDVOC) 239 } else if (sdvox_reg == SDVOC) {
240 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
238 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 241 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
239 else if (sdvox_reg == HDMIB) 242 } else if (sdvox_reg == HDMIB) {
243 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
240 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 244 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
241 "HDMIB"); 245 "HDMIB");
242 else if (sdvox_reg == HDMIC) 246 } else if (sdvox_reg == HDMIC) {
247 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
243 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 248 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
244 "HDMIC"); 249 "HDMIC");
245 else if (sdvox_reg == HDMID) 250 } else if (sdvox_reg == HDMID) {
251 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
246 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 252 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
247 "HDMID"); 253 "HDMID");
248 254 }
249 if (!intel_output->ddc_bus) 255 if (!intel_output->ddc_bus)
250 goto err_connector; 256 goto err_connector;
251 257
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3f445a80c552..8df02ef89261 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -916,6 +916,8 @@ void intel_lvds_init(struct drm_device *dev)
916 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 916 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
917 intel_output->type = INTEL_OUTPUT_LVDS; 917 intel_output->type = INTEL_OUTPUT_LVDS;
918 918
919 intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
920 intel_output->crtc_mask = (1 << 1);
919 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 921 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
920 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 922 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
921 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 923 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 5371d9332554..d3b74ba62b4a 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1458,7 +1458,7 @@ intel_sdvo_multifunc_encoder(struct intel_output *intel_output)
1458 (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)) 1458 (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
1459 caps++; 1459 caps++;
1460 if (sdvo_priv->caps.output_flags & 1460 if (sdvo_priv->caps.output_flags &
1461 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID0)) 1461 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
1462 caps++; 1462 caps++;
1463 if (sdvo_priv->caps.output_flags & 1463 if (sdvo_priv->caps.output_flags &
1464 (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1)) 1464 (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
@@ -1967,6 +1967,9 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
1967 intel_sdvo_set_colorimetry(intel_output, 1967 intel_sdvo_set_colorimetry(intel_output,
1968 SDVO_COLORIMETRY_RGB256); 1968 SDVO_COLORIMETRY_RGB256);
1969 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 1969 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
1970 intel_output->clone_mask =
1971 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
1972 (1 << INTEL_ANALOG_CLONE_BIT);
1970 } 1973 }
1971 } else if (flags & SDVO_OUTPUT_SVID0) { 1974 } else if (flags & SDVO_OUTPUT_SVID0) {
1972 1975
@@ -1975,11 +1978,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
1975 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 1978 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
1976 sdvo_priv->is_tv = true; 1979 sdvo_priv->is_tv = true;
1977 intel_output->needs_tv_clock = true; 1980 intel_output->needs_tv_clock = true;
1981 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
1978 } else if (flags & SDVO_OUTPUT_RGB0) { 1982 } else if (flags & SDVO_OUTPUT_RGB0) {
1979 1983
1980 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; 1984 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
1981 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 1985 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
1982 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 1986 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
1987 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
1988 (1 << INTEL_ANALOG_CLONE_BIT);
1983 } else if (flags & SDVO_OUTPUT_RGB1) { 1989 } else if (flags & SDVO_OUTPUT_RGB1) {
1984 1990
1985 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 1991 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
@@ -1991,12 +1997,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
1991 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 1997 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
1992 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 1998 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
1993 sdvo_priv->is_lvds = true; 1999 sdvo_priv->is_lvds = true;
2000 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2001 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
1994 } else if (flags & SDVO_OUTPUT_LVDS1) { 2002 } else if (flags & SDVO_OUTPUT_LVDS1) {
1995 2003
1996 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; 2004 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
1997 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2005 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
1998 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2006 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
1999 sdvo_priv->is_lvds = true; 2007 sdvo_priv->is_lvds = true;
2008 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2009 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2000 } else { 2010 } else {
2001 2011
2002 unsigned char bytes[2]; 2012 unsigned char bytes[2];
@@ -2009,6 +2019,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2009 bytes[0], bytes[1]); 2019 bytes[0], bytes[1]);
2010 ret = false; 2020 ret = false;
2011 } 2021 }
2022 intel_output->crtc_mask = (1 << 0) | (1 << 1);
2012 2023
2013 if (ret && registered) 2024 if (ret && registered)
2014 ret = drm_sysfs_connector_add(connector) == 0 ? true : false; 2025 ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index da4ab4dc1630..2fbe13a0de81 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1718,6 +1718,7 @@ intel_tv_init(struct drm_device *dev)
1718 if (!intel_output) { 1718 if (!intel_output) {
1719 return; 1719 return;
1720 } 1720 }
1721
1721 connector = &intel_output->base; 1722 connector = &intel_output->base;
1722 1723
1723 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1724 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
@@ -1729,6 +1730,7 @@ intel_tv_init(struct drm_device *dev)
1729 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1730 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1730 tv_priv = (struct intel_tv_priv *)(intel_output + 1); 1731 tv_priv = (struct intel_tv_priv *)(intel_output + 1);
1731 intel_output->type = INTEL_OUTPUT_TVOUT; 1732 intel_output->type = INTEL_OUTPUT_TVOUT;
1733 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1732 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1734 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1733 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1735 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1734 intel_output->dev_priv = tv_priv; 1736 intel_output->dev_priv = tv_priv;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index f1ba8ff41130..68e728e8be4d 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -254,6 +254,72 @@ void r100_mc_fini(struct radeon_device *rdev)
254 254
255 255
256/* 256/*
257 * Interrupts
258 */
259int r100_irq_set(struct radeon_device *rdev)
260{
261 uint32_t tmp = 0;
262
263 if (rdev->irq.sw_int) {
264 tmp |= RADEON_SW_INT_ENABLE;
265 }
266 if (rdev->irq.crtc_vblank_int[0]) {
267 tmp |= RADEON_CRTC_VBLANK_MASK;
268 }
269 if (rdev->irq.crtc_vblank_int[1]) {
270 tmp |= RADEON_CRTC2_VBLANK_MASK;
271 }
272 WREG32(RADEON_GEN_INT_CNTL, tmp);
273 return 0;
274}
275
276static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
277{
278 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
279 uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
280 RADEON_CRTC2_VBLANK_STAT;
281
282 if (irqs) {
283 WREG32(RADEON_GEN_INT_STATUS, irqs);
284 }
285 return irqs & irq_mask;
286}
287
288int r100_irq_process(struct radeon_device *rdev)
289{
290 uint32_t status;
291
292 status = r100_irq_ack(rdev);
293 if (!status) {
294 return IRQ_NONE;
295 }
296 while (status) {
297 /* SW interrupt */
298 if (status & RADEON_SW_INT_TEST) {
299 radeon_fence_process(rdev);
300 }
301 /* Vertical blank interrupts */
302 if (status & RADEON_CRTC_VBLANK_STAT) {
303 drm_handle_vblank(rdev->ddev, 0);
304 }
305 if (status & RADEON_CRTC2_VBLANK_STAT) {
306 drm_handle_vblank(rdev->ddev, 1);
307 }
308 status = r100_irq_ack(rdev);
309 }
310 return IRQ_HANDLED;
311}
312
313u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
314{
315 if (crtc == 0)
316 return RREG32(RADEON_CRTC_CRNT_FRAME);
317 else
318 return RREG32(RADEON_CRTC2_CRNT_FRAME);
319}
320
321
322/*
257 * Fence emission 323 * Fence emission
258 */ 324 */
259void r100_fence_ring_emit(struct radeon_device *rdev, 325void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -1025,6 +1091,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1025 tmp |= tile_flags; 1091 tmp |= tile_flags;
1026 ib[idx] = tmp; 1092 ib[idx] = tmp;
1027 break; 1093 break;
1094 case RADEON_RB3D_ZPASS_ADDR:
1095 r = r100_cs_packet_next_reloc(p, &reloc);
1096 if (r) {
1097 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1098 idx, reg);
1099 r100_cs_dump_packet(p, pkt);
1100 return r;
1101 }
1102 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1103 break;
1028 default: 1104 default:
1029 /* FIXME: we don't want to allow anyothers packet */ 1105 /* FIXME: we don't want to allow anyothers packet */
1030 break; 1106 break;
@@ -1556,26 +1632,6 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1556 r100_pll_errata_after_data(rdev); 1632 r100_pll_errata_after_data(rdev);
1557} 1633}
1558 1634
1559uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
1560{
1561 if (reg < 0x10000)
1562 return readl(((void __iomem *)rdev->rmmio) + reg);
1563 else {
1564 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1565 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1566 }
1567}
1568
1569void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1570{
1571 if (reg < 0x10000)
1572 writel(v, ((void __iomem *)rdev->rmmio) + reg);
1573 else {
1574 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
1575 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
1576 }
1577}
1578
1579int r100_init(struct radeon_device *rdev) 1635int r100_init(struct radeon_device *rdev)
1580{ 1636{
1581 return 0; 1637 return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 9c8d41534a5d..053f4ec397f7 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -83,8 +83,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
83 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); 83 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
84 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 84 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
85 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); 85 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
86 mb();
87 } 86 }
87 mb();
88} 88}
89 89
90int rv370_pcie_gart_enable(struct radeon_device *rdev) 90int rv370_pcie_gart_enable(struct radeon_device *rdev)
@@ -448,6 +448,7 @@ void r300_gpu_init(struct radeon_device *rdev)
448 /* rv350,rv370,rv380 */ 448 /* rv350,rv370,rv380 */
449 rdev->num_gb_pipes = 1; 449 rdev->num_gb_pipes = 1;
450 } 450 }
451 rdev->num_z_pipes = 1;
451 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); 452 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
452 switch (rdev->num_gb_pipes) { 453 switch (rdev->num_gb_pipes) {
453 case 2: 454 case 2:
@@ -486,7 +487,8 @@ void r300_gpu_init(struct radeon_device *rdev)
486 printk(KERN_WARNING "Failed to wait MC idle while " 487 printk(KERN_WARNING "Failed to wait MC idle while "
487 "programming pipes. Bad things might happen.\n"); 488 "programming pipes. Bad things might happen.\n");
488 } 489 }
489 DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); 490 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
491 rdev->num_gb_pipes, rdev->num_z_pipes);
490} 492}
491 493
492int r300_ga_reset(struct radeon_device *rdev) 494int r300_ga_reset(struct radeon_device *rdev)
@@ -593,27 +595,6 @@ void r300_vram_info(struct radeon_device *rdev)
593 595
594 596
595/* 597/*
596 * Indirect registers accessor
597 */
598uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
599{
600 uint32_t r;
601
602 WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
603 (void)RREG32(RADEON_PCIE_INDEX);
604 r = RREG32(RADEON_PCIE_DATA);
605 return r;
606}
607
608void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
609{
610 WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
611 (void)RREG32(RADEON_PCIE_INDEX);
612 WREG32(RADEON_PCIE_DATA, (v));
613 (void)RREG32(RADEON_PCIE_DATA);
614}
615
616/*
617 * PCIE Lanes 598 * PCIE Lanes
618 */ 599 */
619 600
@@ -1403,6 +1384,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1403 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; 1384 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1404 track->textures[i].txdepth = tmp; 1385 track->textures[i].txdepth = tmp;
1405 break; 1386 break;
1387 case R300_ZB_ZPASS_ADDR:
1388 r = r100_cs_packet_next_reloc(p, &reloc);
1389 if (r) {
1390 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1391 idx, reg);
1392 r100_cs_dump_packet(p, pkt);
1393 return r;
1394 }
1395 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1396 break;
1397 case 0x4be8:
1398 /* valid register only on RV530 */
1399 if (p->rdev->family == CHIP_RV530)
1400 break;
1401 /* fallthrough do not move */
1406 default: 1402 default:
1407 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1403 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1408 reg, idx); 1404 reg, idx);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index dea497a979f2..97426a6f370f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -165,7 +165,18 @@ void r420_pipes_init(struct radeon_device *rdev)
165 printk(KERN_WARNING "Failed to wait GUI idle while " 165 printk(KERN_WARNING "Failed to wait GUI idle while "
166 "programming pipes. Bad things might happen.\n"); 166 "programming pipes. Bad things might happen.\n");
167 } 167 }
168 DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); 168
169 if (rdev->family == CHIP_RV530) {
170 tmp = RREG32(RV530_GB_PIPE_SELECT2);
171 if ((tmp & 3) == 3)
172 rdev->num_z_pipes = 2;
173 else
174 rdev->num_z_pipes = 1;
175 } else
176 rdev->num_z_pipes = 1;
177
178 DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
179 rdev->num_gb_pipes, rdev->num_z_pipes);
169} 180}
170 181
171void r420_gpu_init(struct radeon_device *rdev) 182void r420_gpu_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 036691b38cb7..e1d5e0331e19 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -350,6 +350,7 @@
350#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 350#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
351#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 351#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
352#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c 352#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
353#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
353#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 354#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
354 355
355/* master controls */ 356/* master controls */
@@ -438,14 +439,15 @@
438# define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4 439# define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4
439# define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff 440# define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff
440 441
441#define R500_DxMODE_INT_MASK 0x6540
442#define R500_D1MODE_INT_MASK (1<<0)
443#define R500_D2MODE_INT_MASK (1<<8)
444
445#define AVIVO_D1MODE_DATA_FORMAT 0x6528 442#define AVIVO_D1MODE_DATA_FORMAT 0x6528
446# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) 443# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0)
447#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C 444#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C
445#define AVIVO_D1MODE_VBLANK_STATUS 0x6534
446# define AVIVO_VBLANK_ACK (1 << 4)
448#define AVIVO_D1MODE_VLINE_START_END 0x6538 447#define AVIVO_D1MODE_VLINE_START_END 0x6538
448#define AVIVO_DxMODE_INT_MASK 0x6540
449# define AVIVO_D1MODE_INT_MASK (1 << 0)
450# define AVIVO_D2MODE_INT_MASK (1 << 8)
449#define AVIVO_D1MODE_VIEWPORT_START 0x6580 451#define AVIVO_D1MODE_VIEWPORT_START 0x6580
450#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 452#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584
451#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 453#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588
@@ -475,6 +477,7 @@
475#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 477#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884
476#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 478#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888
477#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c 479#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c
480#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4
478#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 481#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4
479 482
480#define AVIVO_D2GRPH_ENABLE 0x6900 483#define AVIVO_D2GRPH_ENABLE 0x6900
@@ -497,6 +500,7 @@
497#define AVIVO_D2CUR_SIZE 0x6c10 500#define AVIVO_D2CUR_SIZE 0x6c10
498#define AVIVO_D2CUR_POSITION 0x6c14 501#define AVIVO_D2CUR_POSITION 0x6c14
499 502
503#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34
500#define AVIVO_D2MODE_VLINE_START_END 0x6d38 504#define AVIVO_D2MODE_VLINE_START_END 0x6d38
501#define AVIVO_D2MODE_VIEWPORT_START 0x6d80 505#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
502#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 506#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
@@ -748,4 +752,8 @@
748# define AVIVO_I2C_EN (1 << 0) 752# define AVIVO_I2C_EN (1 << 0)
749# define AVIVO_I2C_RESET (1 << 8) 753# define AVIVO_I2C_RESET (1 << 8)
750 754
755#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
756# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
757# define AVIVO_D2_VBLANK_INTERRUPT (1 << 5)
758
751#endif 759#endif
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 09fb0b6ec7dd..ebd6b0f7bdff 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -177,7 +177,6 @@ void r520_gpu_init(struct radeon_device *rdev)
177 */ 177 */
178 /* workaround for RV530 */ 178 /* workaround for RV530 */
179 if (rdev->family == CHIP_RV530) { 179 if (rdev->family == CHIP_RV530) {
180 WREG32(0x4124, 1);
181 WREG32(0x4128, 0xFF); 180 WREG32(0x4128, 0xFF);
182 } 181 }
183 r420_pipes_init(rdev); 182 r420_pipes_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b1d945b8ed6c..b519fb2fecbb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
242 uint64_t *gpu_addr); 242 uint64_t *gpu_addr);
243void radeon_object_unpin(struct radeon_object *robj); 243void radeon_object_unpin(struct radeon_object *robj);
244int radeon_object_wait(struct radeon_object *robj); 244int radeon_object_wait(struct radeon_object *robj);
245int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
245int radeon_object_evict_vram(struct radeon_device *rdev); 246int radeon_object_evict_vram(struct radeon_device *rdev);
246int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset); 247int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
247void radeon_object_force_delete(struct radeon_device *rdev); 248void radeon_object_force_delete(struct radeon_device *rdev);
@@ -574,6 +575,7 @@ struct radeon_asic {
574 void (*ring_start)(struct radeon_device *rdev); 575 void (*ring_start)(struct radeon_device *rdev);
575 int (*irq_set)(struct radeon_device *rdev); 576 int (*irq_set)(struct radeon_device *rdev);
576 int (*irq_process)(struct radeon_device *rdev); 577 int (*irq_process)(struct radeon_device *rdev);
578 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
577 void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); 579 void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
578 int (*cs_parse)(struct radeon_cs_parser *p); 580 int (*cs_parse)(struct radeon_cs_parser *p);
579 int (*copy_blit)(struct radeon_device *rdev, 581 int (*copy_blit)(struct radeon_device *rdev,
@@ -653,6 +655,7 @@ struct radeon_device {
653 int usec_timeout; 655 int usec_timeout;
654 enum radeon_pll_errata pll_errata; 656 enum radeon_pll_errata pll_errata;
655 int num_gb_pipes; 657 int num_gb_pipes;
658 int num_z_pipes;
656 int disp_priority; 659 int disp_priority;
657 /* BIOS */ 660 /* BIOS */
658 uint8_t *bios; 661 uint8_t *bios;
@@ -666,14 +669,11 @@ struct radeon_device {
666 resource_size_t rmmio_base; 669 resource_size_t rmmio_base;
667 resource_size_t rmmio_size; 670 resource_size_t rmmio_size;
668 void *rmmio; 671 void *rmmio;
669 radeon_rreg_t mm_rreg;
670 radeon_wreg_t mm_wreg;
671 radeon_rreg_t mc_rreg; 672 radeon_rreg_t mc_rreg;
672 radeon_wreg_t mc_wreg; 673 radeon_wreg_t mc_wreg;
673 radeon_rreg_t pll_rreg; 674 radeon_rreg_t pll_rreg;
674 radeon_wreg_t pll_wreg; 675 radeon_wreg_t pll_wreg;
675 radeon_rreg_t pcie_rreg; 676 uint32_t pcie_reg_mask;
676 radeon_wreg_t pcie_wreg;
677 radeon_rreg_t pciep_rreg; 677 radeon_rreg_t pciep_rreg;
678 radeon_wreg_t pciep_wreg; 678 radeon_wreg_t pciep_wreg;
679 struct radeon_clock clock; 679 struct radeon_clock clock;
@@ -705,22 +705,42 @@ int radeon_device_init(struct radeon_device *rdev,
705void radeon_device_fini(struct radeon_device *rdev); 705void radeon_device_fini(struct radeon_device *rdev);
706int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 706int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
707 707
708static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
709{
710 if (reg < 0x10000)
711 return readl(((void __iomem *)rdev->rmmio) + reg);
712 else {
713 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
714 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
715 }
716}
717
718static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
719{
720 if (reg < 0x10000)
721 writel(v, ((void __iomem *)rdev->rmmio) + reg);
722 else {
723 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
724 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
725 }
726}
727
708 728
709/* 729/*
710 * Registers read & write functions. 730 * Registers read & write functions.
711 */ 731 */
712#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) 732#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
713#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) 733#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
714#define RREG32(reg) rdev->mm_rreg(rdev, (reg)) 734#define RREG32(reg) r100_mm_rreg(rdev, (reg))
715#define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v)) 735#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
716#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 736#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
717#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 737#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
718#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) 738#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
719#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) 739#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
720#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) 740#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
721#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) 741#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
722#define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg)) 742#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
723#define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v)) 743#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
724#define WREG32_P(reg, val, mask) \ 744#define WREG32_P(reg, val, mask) \
725 do { \ 745 do { \
726 uint32_t tmp_ = RREG32(reg); \ 746 uint32_t tmp_ = RREG32(reg); \
@@ -736,6 +756,24 @@ int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
736 WREG32_PLL(reg, tmp_); \ 756 WREG32_PLL(reg, tmp_); \
737 } while (0) 757 } while (0)
738 758
759/*
760 * Indirect registers accessor
761 */
762static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
763{
764 uint32_t r;
765
766 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
767 r = RREG32(RADEON_PCIE_DATA);
768 return r;
769}
770
771static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
772{
773 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
774 WREG32(RADEON_PCIE_DATA, (v));
775}
776
739void r100_pll_errata_after_index(struct radeon_device *rdev); 777void r100_pll_errata_after_index(struct radeon_device *rdev);
740 778
741 779
@@ -862,6 +900,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
862#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) 900#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
863#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) 901#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
864#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) 902#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
903#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
865#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) 904#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
866#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) 905#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
867#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) 906#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 9a75876e0c3b..7ca6c13569b5 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -49,6 +49,7 @@ void r100_vram_info(struct radeon_device *rdev);
49int r100_gpu_reset(struct radeon_device *rdev); 49int r100_gpu_reset(struct radeon_device *rdev);
50int r100_mc_init(struct radeon_device *rdev); 50int r100_mc_init(struct radeon_device *rdev);
51void r100_mc_fini(struct radeon_device *rdev); 51void r100_mc_fini(struct radeon_device *rdev);
52u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
52int r100_wb_init(struct radeon_device *rdev); 53int r100_wb_init(struct radeon_device *rdev);
53void r100_wb_fini(struct radeon_device *rdev); 54void r100_wb_fini(struct radeon_device *rdev);
54int r100_gart_enable(struct radeon_device *rdev); 55int r100_gart_enable(struct radeon_device *rdev);
@@ -96,6 +97,7 @@ static struct radeon_asic r100_asic = {
96 .ring_start = &r100_ring_start, 97 .ring_start = &r100_ring_start,
97 .irq_set = &r100_irq_set, 98 .irq_set = &r100_irq_set,
98 .irq_process = &r100_irq_process, 99 .irq_process = &r100_irq_process,
100 .get_vblank_counter = &r100_get_vblank_counter,
99 .fence_ring_emit = &r100_fence_ring_emit, 101 .fence_ring_emit = &r100_fence_ring_emit,
100 .cs_parse = &r100_cs_parse, 102 .cs_parse = &r100_cs_parse,
101 .copy_blit = &r100_copy_blit, 103 .copy_blit = &r100_copy_blit,
@@ -156,6 +158,7 @@ static struct radeon_asic r300_asic = {
156 .ring_start = &r300_ring_start, 158 .ring_start = &r300_ring_start,
157 .irq_set = &r100_irq_set, 159 .irq_set = &r100_irq_set,
158 .irq_process = &r100_irq_process, 160 .irq_process = &r100_irq_process,
161 .get_vblank_counter = &r100_get_vblank_counter,
159 .fence_ring_emit = &r300_fence_ring_emit, 162 .fence_ring_emit = &r300_fence_ring_emit,
160 .cs_parse = &r300_cs_parse, 163 .cs_parse = &r300_cs_parse,
161 .copy_blit = &r100_copy_blit, 164 .copy_blit = &r100_copy_blit,
@@ -196,6 +199,7 @@ static struct radeon_asic r420_asic = {
196 .ring_start = &r300_ring_start, 199 .ring_start = &r300_ring_start,
197 .irq_set = &r100_irq_set, 200 .irq_set = &r100_irq_set,
198 .irq_process = &r100_irq_process, 201 .irq_process = &r100_irq_process,
202 .get_vblank_counter = &r100_get_vblank_counter,
199 .fence_ring_emit = &r300_fence_ring_emit, 203 .fence_ring_emit = &r300_fence_ring_emit,
200 .cs_parse = &r300_cs_parse, 204 .cs_parse = &r300_cs_parse,
201 .copy_blit = &r100_copy_blit, 205 .copy_blit = &r100_copy_blit,
@@ -243,6 +247,7 @@ static struct radeon_asic rs400_asic = {
243 .ring_start = &r300_ring_start, 247 .ring_start = &r300_ring_start,
244 .irq_set = &r100_irq_set, 248 .irq_set = &r100_irq_set,
245 .irq_process = &r100_irq_process, 249 .irq_process = &r100_irq_process,
250 .get_vblank_counter = &r100_get_vblank_counter,
246 .fence_ring_emit = &r300_fence_ring_emit, 251 .fence_ring_emit = &r300_fence_ring_emit,
247 .cs_parse = &r300_cs_parse, 252 .cs_parse = &r300_cs_parse,
248 .copy_blit = &r100_copy_blit, 253 .copy_blit = &r100_copy_blit,
@@ -266,6 +271,8 @@ void rs600_vram_info(struct radeon_device *rdev);
266int rs600_mc_init(struct radeon_device *rdev); 271int rs600_mc_init(struct radeon_device *rdev);
267void rs600_mc_fini(struct radeon_device *rdev); 272void rs600_mc_fini(struct radeon_device *rdev);
268int rs600_irq_set(struct radeon_device *rdev); 273int rs600_irq_set(struct radeon_device *rdev);
274int rs600_irq_process(struct radeon_device *rdev);
275u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
269int rs600_gart_enable(struct radeon_device *rdev); 276int rs600_gart_enable(struct radeon_device *rdev);
270void rs600_gart_disable(struct radeon_device *rdev); 277void rs600_gart_disable(struct radeon_device *rdev);
271void rs600_gart_tlb_flush(struct radeon_device *rdev); 278void rs600_gart_tlb_flush(struct radeon_device *rdev);
@@ -291,7 +298,8 @@ static struct radeon_asic rs600_asic = {
291 .cp_disable = &r100_cp_disable, 298 .cp_disable = &r100_cp_disable,
292 .ring_start = &r300_ring_start, 299 .ring_start = &r300_ring_start,
293 .irq_set = &rs600_irq_set, 300 .irq_set = &rs600_irq_set,
294 .irq_process = &r100_irq_process, 301 .irq_process = &rs600_irq_process,
302 .get_vblank_counter = &rs600_get_vblank_counter,
295 .fence_ring_emit = &r300_fence_ring_emit, 303 .fence_ring_emit = &r300_fence_ring_emit,
296 .cs_parse = &r300_cs_parse, 304 .cs_parse = &r300_cs_parse,
297 .copy_blit = &r100_copy_blit, 305 .copy_blit = &r100_copy_blit,
@@ -308,6 +316,7 @@ static struct radeon_asic rs600_asic = {
308/* 316/*
309 * rs690,rs740 317 * rs690,rs740
310 */ 318 */
319int rs690_init(struct radeon_device *rdev);
311void rs690_errata(struct radeon_device *rdev); 320void rs690_errata(struct radeon_device *rdev);
312void rs690_vram_info(struct radeon_device *rdev); 321void rs690_vram_info(struct radeon_device *rdev);
313int rs690_mc_init(struct radeon_device *rdev); 322int rs690_mc_init(struct radeon_device *rdev);
@@ -316,7 +325,7 @@ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
316void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 325void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
317void rs690_bandwidth_update(struct radeon_device *rdev); 326void rs690_bandwidth_update(struct radeon_device *rdev);
318static struct radeon_asic rs690_asic = { 327static struct radeon_asic rs690_asic = {
319 .init = &r300_init, 328 .init = &rs690_init,
320 .errata = &rs690_errata, 329 .errata = &rs690_errata,
321 .vram_info = &rs690_vram_info, 330 .vram_info = &rs690_vram_info,
322 .gpu_reset = &r300_gpu_reset, 331 .gpu_reset = &r300_gpu_reset,
@@ -333,7 +342,8 @@ static struct radeon_asic rs690_asic = {
333 .cp_disable = &r100_cp_disable, 342 .cp_disable = &r100_cp_disable,
334 .ring_start = &r300_ring_start, 343 .ring_start = &r300_ring_start,
335 .irq_set = &rs600_irq_set, 344 .irq_set = &rs600_irq_set,
336 .irq_process = &r100_irq_process, 345 .irq_process = &rs600_irq_process,
346 .get_vblank_counter = &rs600_get_vblank_counter,
337 .fence_ring_emit = &r300_fence_ring_emit, 347 .fence_ring_emit = &r300_fence_ring_emit,
338 .cs_parse = &r300_cs_parse, 348 .cs_parse = &r300_cs_parse,
339 .copy_blit = &r100_copy_blit, 349 .copy_blit = &r100_copy_blit,
@@ -381,8 +391,9 @@ static struct radeon_asic rv515_asic = {
381 .cp_fini = &r100_cp_fini, 391 .cp_fini = &r100_cp_fini,
382 .cp_disable = &r100_cp_disable, 392 .cp_disable = &r100_cp_disable,
383 .ring_start = &rv515_ring_start, 393 .ring_start = &rv515_ring_start,
384 .irq_set = &r100_irq_set, 394 .irq_set = &rs600_irq_set,
385 .irq_process = &r100_irq_process, 395 .irq_process = &rs600_irq_process,
396 .get_vblank_counter = &rs600_get_vblank_counter,
386 .fence_ring_emit = &r300_fence_ring_emit, 397 .fence_ring_emit = &r300_fence_ring_emit,
387 .cs_parse = &r300_cs_parse, 398 .cs_parse = &r300_cs_parse,
388 .copy_blit = &r100_copy_blit, 399 .copy_blit = &r100_copy_blit,
@@ -423,8 +434,9 @@ static struct radeon_asic r520_asic = {
423 .cp_fini = &r100_cp_fini, 434 .cp_fini = &r100_cp_fini,
424 .cp_disable = &r100_cp_disable, 435 .cp_disable = &r100_cp_disable,
425 .ring_start = &rv515_ring_start, 436 .ring_start = &rv515_ring_start,
426 .irq_set = &r100_irq_set, 437 .irq_set = &rs600_irq_set,
427 .irq_process = &r100_irq_process, 438 .irq_process = &rs600_irq_process,
439 .get_vblank_counter = &rs600_get_vblank_counter,
428 .fence_ring_emit = &r300_fence_ring_emit, 440 .fence_ring_emit = &r300_fence_ring_emit,
429 .cs_parse = &r300_cs_parse, 441 .cs_parse = &r300_cs_parse,
430 .copy_blit = &r100_copy_blit, 442 .copy_blit = &r100_copy_blit,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index afc4db280b94..2a027e00762a 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -685,23 +685,15 @@ static const uint32_t default_tvdac_adj[CHIP_LAST] = {
685 0x00780000, /* rs480 */ 685 0x00780000, /* rs480 */
686}; 686};
687 687
688static struct radeon_encoder_tv_dac 688static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev,
689 *radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev) 689 struct radeon_encoder_tv_dac *tv_dac)
690{ 690{
691 struct radeon_encoder_tv_dac *tv_dac = NULL;
692
693 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
694
695 if (!tv_dac)
696 return NULL;
697
698 tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family]; 691 tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family];
699 if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250)) 692 if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250))
700 tv_dac->ps2_tvdac_adj = 0x00880000; 693 tv_dac->ps2_tvdac_adj = 0x00880000;
701 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 694 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
702 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 695 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
703 696 return;
704 return tv_dac;
705} 697}
706 698
707struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct 699struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
@@ -713,19 +705,18 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
713 uint16_t dac_info; 705 uint16_t dac_info;
714 uint8_t rev, bg, dac; 706 uint8_t rev, bg, dac;
715 struct radeon_encoder_tv_dac *tv_dac = NULL; 707 struct radeon_encoder_tv_dac *tv_dac = NULL;
708 int found = 0;
709
710 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
711 if (!tv_dac)
712 return NULL;
716 713
717 if (rdev->bios == NULL) 714 if (rdev->bios == NULL)
718 return radeon_legacy_get_tv_dac_info_from_table(rdev); 715 goto out;
719 716
720 /* first check TV table */ 717 /* first check TV table */
721 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); 718 dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
722 if (dac_info) { 719 if (dac_info) {
723 tv_dac =
724 kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
725
726 if (!tv_dac)
727 return NULL;
728
729 rev = RBIOS8(dac_info + 0x3); 720 rev = RBIOS8(dac_info + 0x3);
730 if (rev > 4) { 721 if (rev > 4) {
731 bg = RBIOS8(dac_info + 0xc) & 0xf; 722 bg = RBIOS8(dac_info + 0xc) & 0xf;
@@ -739,6 +730,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
739 bg = RBIOS8(dac_info + 0x10) & 0xf; 730 bg = RBIOS8(dac_info + 0x10) & 0xf;
740 dac = RBIOS8(dac_info + 0x11) & 0xf; 731 dac = RBIOS8(dac_info + 0x11) & 0xf;
741 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 732 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
733 found = 1;
742 } else if (rev > 1) { 734 } else if (rev > 1) {
743 bg = RBIOS8(dac_info + 0xc) & 0xf; 735 bg = RBIOS8(dac_info + 0xc) & 0xf;
744 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; 736 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
@@ -751,22 +743,15 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
751 bg = RBIOS8(dac_info + 0xe) & 0xf; 743 bg = RBIOS8(dac_info + 0xe) & 0xf;
752 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; 744 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
753 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 745 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
746 found = 1;
754 } 747 }
755
756 tv_dac->tv_std = radeon_combios_get_tv_info(encoder); 748 tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
757 749 }
758 } else { 750 if (!found) {
759 /* then check CRT table */ 751 /* then check CRT table */
760 dac_info = 752 dac_info =
761 combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); 753 combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
762 if (dac_info) { 754 if (dac_info) {
763 tv_dac =
764 kzalloc(sizeof(struct radeon_encoder_tv_dac),
765 GFP_KERNEL);
766
767 if (!tv_dac)
768 return NULL;
769
770 rev = RBIOS8(dac_info) & 0x3; 755 rev = RBIOS8(dac_info) & 0x3;
771 if (rev < 2) { 756 if (rev < 2) {
772 bg = RBIOS8(dac_info + 0x3) & 0xf; 757 bg = RBIOS8(dac_info + 0x3) & 0xf;
@@ -775,6 +760,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
775 (bg << 16) | (dac << 20); 760 (bg << 16) | (dac << 20);
776 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 761 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
777 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 762 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
763 found = 1;
778 } else { 764 } else {
779 bg = RBIOS8(dac_info + 0x4) & 0xf; 765 bg = RBIOS8(dac_info + 0x4) & 0xf;
780 dac = RBIOS8(dac_info + 0x5) & 0xf; 766 dac = RBIOS8(dac_info + 0x5) & 0xf;
@@ -782,13 +768,17 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
782 (bg << 16) | (dac << 20); 768 (bg << 16) | (dac << 20);
783 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 769 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
784 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 770 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
771 found = 1;
785 } 772 }
786 } else { 773 } else {
787 DRM_INFO("No TV DAC info found in BIOS\n"); 774 DRM_INFO("No TV DAC info found in BIOS\n");
788 return radeon_legacy_get_tv_dac_info_from_table(rdev);
789 } 775 }
790 } 776 }
791 777
778out:
779 if (!found) /* fallback to defaults */
780 radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
781
792 return tv_dac; 782 return tv_dac;
793} 783}
794 784
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index d8356827ef17..7a52c461145c 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -406,6 +406,15 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
406{ 406{
407 uint32_t gb_tile_config, gb_pipe_sel = 0; 407 uint32_t gb_tile_config, gb_pipe_sel = 0;
408 408
409 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
410 uint32_t z_pipe_sel = RADEON_READ(RV530_GB_PIPE_SELECT2);
411 if ((z_pipe_sel & 3) == 3)
412 dev_priv->num_z_pipes = 2;
413 else
414 dev_priv->num_z_pipes = 1;
415 } else
416 dev_priv->num_z_pipes = 1;
417
409 /* RS4xx/RS6xx/R4xx/R5xx */ 418 /* RS4xx/RS6xx/R4xx/R5xx */
410 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { 419 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
411 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); 420 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 9ff6dcb97f9d..7693f7c67bd3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -225,25 +225,18 @@ void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
225 225
226void radeon_register_accessor_init(struct radeon_device *rdev) 226void radeon_register_accessor_init(struct radeon_device *rdev)
227{ 227{
228 rdev->mm_rreg = &r100_mm_rreg;
229 rdev->mm_wreg = &r100_mm_wreg;
230 rdev->mc_rreg = &radeon_invalid_rreg; 228 rdev->mc_rreg = &radeon_invalid_rreg;
231 rdev->mc_wreg = &radeon_invalid_wreg; 229 rdev->mc_wreg = &radeon_invalid_wreg;
232 rdev->pll_rreg = &radeon_invalid_rreg; 230 rdev->pll_rreg = &radeon_invalid_rreg;
233 rdev->pll_wreg = &radeon_invalid_wreg; 231 rdev->pll_wreg = &radeon_invalid_wreg;
234 rdev->pcie_rreg = &radeon_invalid_rreg;
235 rdev->pcie_wreg = &radeon_invalid_wreg;
236 rdev->pciep_rreg = &radeon_invalid_rreg; 232 rdev->pciep_rreg = &radeon_invalid_rreg;
237 rdev->pciep_wreg = &radeon_invalid_wreg; 233 rdev->pciep_wreg = &radeon_invalid_wreg;
238 234
239 /* Don't change order as we are overridding accessor. */ 235 /* Don't change order as we are overridding accessor. */
240 if (rdev->family < CHIP_RV515) { 236 if (rdev->family < CHIP_RV515) {
241 rdev->pcie_rreg = &rv370_pcie_rreg; 237 rdev->pcie_reg_mask = 0xff;
242 rdev->pcie_wreg = &rv370_pcie_wreg; 238 } else {
243 } 239 rdev->pcie_reg_mask = 0x7ff;
244 if (rdev->family >= CHIP_RV515) {
245 rdev->pcie_rreg = &rv515_pcie_rreg;
246 rdev->pcie_wreg = &rv515_pcie_wreg;
247 } 240 }
248 /* FIXME: not sure here */ 241 /* FIXME: not sure here */
249 if (rdev->family <= CHIP_R580) { 242 if (rdev->family <= CHIP_R580) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 3933f8216a34..6fa32dac4e97 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -100,9 +100,10 @@
100 * 1.28- Add support for VBL on CRTC2 100 * 1.28- Add support for VBL on CRTC2
101 * 1.29- R500 3D cmd buffer support 101 * 1.29- R500 3D cmd buffer support
102 * 1.30- Add support for occlusion queries 102 * 1.30- Add support for occlusion queries
103 * 1.31- Add support for num Z pipes from GET_PARAM
103 */ 104 */
104#define DRIVER_MAJOR 1 105#define DRIVER_MAJOR 1
105#define DRIVER_MINOR 30 106#define DRIVER_MINOR 31
106#define DRIVER_PATCHLEVEL 0 107#define DRIVER_PATCHLEVEL 0
107 108
108/* 109/*
@@ -329,6 +330,7 @@ typedef struct drm_radeon_private {
329 resource_size_t fb_aper_offset; 330 resource_size_t fb_aper_offset;
330 331
331 int num_gb_pipes; 332 int num_gb_pipes;
333 int num_z_pipes;
332 int track_flush; 334 int track_flush;
333 drm_local_map_t *mmio; 335 drm_local_map_t *mmio;
334 336
@@ -689,6 +691,7 @@ extern void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pciga
689 691
690/* pipe config regs */ 692/* pipe config regs */
691#define R400_GB_PIPE_SELECT 0x402c 693#define R400_GB_PIPE_SELECT 0x402c
694#define RV530_GB_PIPE_SELECT2 0x4124
692#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */ 695#define R500_DYN_SCLK_PWMEM_PIPE 0x000d /* PLL */
693#define R300_GB_TILE_CONFIG 0x4018 696#define R300_GB_TILE_CONFIG 0x4018
694# define R300_ENABLE_TILING (1 << 0) 697# define R300_ENABLE_TILING (1 << 0)
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3206c0ad7b6c..ec383edf5f38 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -574,6 +574,8 @@ int radeonfb_create(struct radeon_device *rdev,
574 goto out_unref; 574 goto out_unref;
575 } 575 }
576 576
577 memset_io(fbptr, 0, aligned_size);
578
577 strcpy(info->fix.id, "radeondrmfb"); 579 strcpy(info->fix.id, "radeondrmfb");
578 info->fix.type = FB_TYPE_PACKED_PIXELS; 580 info->fix.type = FB_TYPE_PACKED_PIXELS;
579 info->fix.visual = FB_VISUAL_TRUECOLOR; 581 info->fix.visual = FB_VISUAL_TRUECOLOR;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index cded5180c752..d880edf254db 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -262,8 +262,34 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
262int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 262int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *filp) 263 struct drm_file *filp)
264{ 264{
265 /* FIXME: implement */ 265 struct drm_radeon_gem_busy *args = data;
266 return 0; 266 struct drm_gem_object *gobj;
267 struct radeon_object *robj;
268 int r;
269 uint32_t cur_placement;
270
271 gobj = drm_gem_object_lookup(dev, filp, args->handle);
272 if (gobj == NULL) {
273 return -EINVAL;
274 }
275 robj = gobj->driver_private;
276 r = radeon_object_busy_domain(robj, &cur_placement);
277 switch (cur_placement) {
278 case TTM_PL_VRAM:
279 args->domain = RADEON_GEM_DOMAIN_VRAM;
280 break;
281 case TTM_PL_TT:
282 args->domain = RADEON_GEM_DOMAIN_GTT;
283 break;
284 case TTM_PL_SYSTEM:
285 args->domain = RADEON_GEM_DOMAIN_CPU;
286 default:
287 break;
288 }
289 mutex_lock(&dev->struct_mutex);
290 drm_gem_object_unreference(gobj);
291 mutex_unlock(&dev->struct_mutex);
292 return r;
267} 293}
268 294
269int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, 295int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 491d569deb0e..9805e4b6ca1b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,60 +32,6 @@
32#include "radeon.h" 32#include "radeon.h"
33#include "atom.h" 33#include "atom.h"
34 34
35static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
36{
37 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
38 uint32_t irq_mask = RADEON_SW_INT_TEST;
39
40 if (irqs) {
41 WREG32(RADEON_GEN_INT_STATUS, irqs);
42 }
43 return irqs & irq_mask;
44}
45
46int r100_irq_set(struct radeon_device *rdev)
47{
48 uint32_t tmp = 0;
49
50 if (rdev->irq.sw_int) {
51 tmp |= RADEON_SW_INT_ENABLE;
52 }
53 /* Todo go through CRTC and enable vblank int or not */
54 WREG32(RADEON_GEN_INT_CNTL, tmp);
55 return 0;
56}
57
58int r100_irq_process(struct radeon_device *rdev)
59{
60 uint32_t status;
61
62 status = r100_irq_ack(rdev);
63 if (!status) {
64 return IRQ_NONE;
65 }
66 while (status) {
67 /* SW interrupt */
68 if (status & RADEON_SW_INT_TEST) {
69 radeon_fence_process(rdev);
70 }
71 status = r100_irq_ack(rdev);
72 }
73 return IRQ_HANDLED;
74}
75
76int rs600_irq_set(struct radeon_device *rdev)
77{
78 uint32_t tmp = 0;
79
80 if (rdev->irq.sw_int) {
81 tmp |= RADEON_SW_INT_ENABLE;
82 }
83 WREG32(RADEON_GEN_INT_CNTL, tmp);
84 /* Todo go through CRTC and enable vblank int or not */
85 WREG32(R500_DxMODE_INT_MASK, 0);
86 return 0;
87}
88
89irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) 35irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
90{ 36{
91 struct drm_device *dev = (struct drm_device *) arg; 37 struct drm_device *dev = (struct drm_device *) arg;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 3357110e30ce..dce09ada32bc 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -95,6 +95,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
95 case RADEON_INFO_NUM_GB_PIPES: 95 case RADEON_INFO_NUM_GB_PIPES:
96 value = rdev->num_gb_pipes; 96 value = rdev->num_gb_pipes;
97 break; 97 break;
98 case RADEON_INFO_NUM_Z_PIPES:
99 value = rdev->num_z_pipes;
100 break;
98 default: 101 default:
99 DRM_DEBUG("Invalid request %d\n", info->request); 102 DRM_DEBUG("Invalid request %d\n", info->request);
100 return -EINVAL; 103 return -EINVAL;
@@ -141,19 +144,42 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
141 */ 144 */
142u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 145u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
143{ 146{
144 /* FIXME: implement */ 147 struct radeon_device *rdev = dev->dev_private;
145 return 0; 148
149 if (crtc < 0 || crtc > 1) {
150 DRM_ERROR("Invalid crtc %d\n", crtc);
151 return -EINVAL;
152 }
153
154 return radeon_get_vblank_counter(rdev, crtc);
146} 155}
147 156
148int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) 157int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
149{ 158{
150 /* FIXME: implement */ 159 struct radeon_device *rdev = dev->dev_private;
151 return 0; 160
161 if (crtc < 0 || crtc > 1) {
162 DRM_ERROR("Invalid crtc %d\n", crtc);
163 return -EINVAL;
164 }
165
166 rdev->irq.crtc_vblank_int[crtc] = true;
167
168 return radeon_irq_set(rdev);
152} 169}
153 170
154void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) 171void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
155{ 172{
156 /* FIXME: implement */ 173 struct radeon_device *rdev = dev->dev_private;
174
175 if (crtc < 0 || crtc > 1) {
176 DRM_ERROR("Invalid crtc %d\n", crtc);
177 return;
178 }
179
180 rdev->irq.crtc_vblank_int[crtc] = false;
181
182 radeon_irq_set(rdev);
157} 183}
158 184
159 185
@@ -295,5 +321,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
295 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), 321 DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
296 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), 322 DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH),
297 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), 323 DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH),
324 DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH),
298}; 325};
299int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); 326int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 7d06dc98a42a..0da72f18fd3a 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -310,10 +310,13 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
310 RADEON_CRTC_DISP_REQ_EN_B)); 310 RADEON_CRTC_DISP_REQ_EN_B));
311 WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); 311 WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask);
312 } 312 }
313 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
314 radeon_crtc_load_lut(crtc);
313 break; 315 break;
314 case DRM_MODE_DPMS_STANDBY: 316 case DRM_MODE_DPMS_STANDBY:
315 case DRM_MODE_DPMS_SUSPEND: 317 case DRM_MODE_DPMS_SUSPEND:
316 case DRM_MODE_DPMS_OFF: 318 case DRM_MODE_DPMS_OFF:
319 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
317 if (radeon_crtc->crtc_id) 320 if (radeon_crtc->crtc_id)
318 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); 321 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
319 else { 322 else {
@@ -323,10 +326,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
323 } 326 }
324 break; 327 break;
325 } 328 }
326
327 if (mode != DRM_MODE_DPMS_OFF) {
328 radeon_crtc_load_lut(crtc);
329 }
330} 329}
331 330
332/* properly set crtc bpp when using atombios */ 331/* properly set crtc bpp when using atombios */
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 34d0f58eb944..9322675ef6d0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -1066,6 +1066,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1066 1066
1067 switch (radeon_encoder->encoder_id) { 1067 switch (radeon_encoder->encoder_id) {
1068 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 1068 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
1069 encoder->possible_crtcs = 0x1;
1069 drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); 1070 drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS);
1070 drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); 1071 drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs);
1071 if (rdev->is_atom_bios) 1072 if (rdev->is_atom_bios)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index e98cae3bf4a6..b85fb83d7ae8 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -316,6 +316,25 @@ int radeon_object_wait(struct radeon_object *robj)
316 return r; 316 return r;
317} 317}
318 318
319int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
320{
321 int r = 0;
322
323 r = radeon_object_reserve(robj, true);
324 if (unlikely(r != 0)) {
325 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
326 return r;
327 }
328 spin_lock(&robj->tobj.lock);
329 *cur_placement = robj->tobj.mem.mem_type;
330 if (robj->tobj.sync_obj) {
331 r = ttm_bo_wait(&robj->tobj, true, true, true);
332 }
333 spin_unlock(&robj->tobj.lock);
334 radeon_object_unreserve(robj);
335 return r;
336}
337
319int radeon_object_evict_vram(struct radeon_device *rdev) 338int radeon_object_evict_vram(struct radeon_device *rdev)
320{ 339{
321 if (rdev->flags & RADEON_IS_IGP) { 340 if (rdev->flags & RADEON_IS_IGP) {
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index e1b618574461..4df43f62c678 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -982,12 +982,15 @@
982# define RS400_TMDS2_PLLRST (1 << 1) 982# define RS400_TMDS2_PLLRST (1 << 1)
983 983
984#define RADEON_GEN_INT_CNTL 0x0040 984#define RADEON_GEN_INT_CNTL 0x0040
985# define RADEON_CRTC_VBLANK_MASK (1 << 0)
986# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
985# define RADEON_SW_INT_ENABLE (1 << 25) 987# define RADEON_SW_INT_ENABLE (1 << 25)
986#define RADEON_GEN_INT_STATUS 0x0044 988#define RADEON_GEN_INT_STATUS 0x0044
987# define RADEON_VSYNC_INT_AK (1 << 2) 989# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
988# define RADEON_VSYNC_INT (1 << 2) 990# define RADEON_CRTC_VBLANK_STAT (1 << 0)
989# define RADEON_VSYNC2_INT_AK (1 << 6) 991# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
990# define RADEON_VSYNC2_INT (1 << 6) 992# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
993# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
991# define RADEON_SW_INT_FIRE (1 << 26) 994# define RADEON_SW_INT_FIRE (1 << 26)
992# define RADEON_SW_INT_TEST (1 << 25) 995# define RADEON_SW_INT_TEST (1 << 25)
993# define RADEON_SW_INT_TEST_ACK (1 << 25) 996# define RADEON_SW_INT_TEST_ACK (1 << 25)
@@ -2334,6 +2337,9 @@
2334# define RADEON_RE_WIDTH_SHIFT 0 2337# define RADEON_RE_WIDTH_SHIFT 0
2335# define RADEON_RE_HEIGHT_SHIFT 16 2338# define RADEON_RE_HEIGHT_SHIFT 16
2336 2339
2340#define RADEON_RB3D_ZPASS_DATA 0x3290
2341#define RADEON_RB3D_ZPASS_ADDR 0x3294
2342
2337#define RADEON_SE_CNTL 0x1c4c 2343#define RADEON_SE_CNTL 0x1c4c
2338# define RADEON_FFACE_CULL_CW (0 << 0) 2344# define RADEON_FFACE_CULL_CW (0 << 0)
2339# define RADEON_FFACE_CULL_CCW (1 << 0) 2345# define RADEON_FFACE_CULL_CCW (1 << 0)
@@ -3568,4 +3574,6 @@
3568#define RADEON_SCRATCH_REG4 0x15f0 3574#define RADEON_SCRATCH_REG4 0x15f0
3569#define RADEON_SCRATCH_REG5 0x15f4 3575#define RADEON_SCRATCH_REG5 0x15f4
3570 3576
3577#define RV530_GB_PIPE_SELECT2 0x4124
3578
3571#endif 3579#endif
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 46645f3e0328..2882f40d5ec5 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -3081,6 +3081,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
3081 case RADEON_PARAM_NUM_GB_PIPES: 3081 case RADEON_PARAM_NUM_GB_PIPES:
3082 value = dev_priv->num_gb_pipes; 3082 value = dev_priv->num_gb_pipes;
3083 break; 3083 break;
3084 case RADEON_PARAM_NUM_Z_PIPES:
3085 value = dev_priv->num_z_pipes;
3086 break;
3084 default: 3087 default:
3085 DRM_DEBUG("Invalid parameter %d\n", param->param); 3088 DRM_DEBUG("Invalid parameter %d\n", param->param);
3086 return -EINVAL; 3089 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index bbea6dee4a94..7e8ce983a908 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -240,6 +240,88 @@ void rs600_mc_fini(struct radeon_device *rdev)
240 240
241 241
242/* 242/*
243 * Interrupts
244 */
245int rs600_irq_set(struct radeon_device *rdev)
246{
247 uint32_t tmp = 0;
248 uint32_t mode_int = 0;
249
250 if (rdev->irq.sw_int) {
251 tmp |= RADEON_SW_INT_ENABLE;
252 }
253 if (rdev->irq.crtc_vblank_int[0]) {
254 tmp |= AVIVO_DISPLAY_INT_STATUS;
255 mode_int |= AVIVO_D1MODE_INT_MASK;
256 }
257 if (rdev->irq.crtc_vblank_int[1]) {
258 tmp |= AVIVO_DISPLAY_INT_STATUS;
259 mode_int |= AVIVO_D2MODE_INT_MASK;
260 }
261 WREG32(RADEON_GEN_INT_CNTL, tmp);
262 WREG32(AVIVO_DxMODE_INT_MASK, mode_int);
263 return 0;
264}
265
266static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
267{
268 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
269 uint32_t irq_mask = RADEON_SW_INT_TEST;
270
271 if (irqs & AVIVO_DISPLAY_INT_STATUS) {
272 *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS);
273 if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
274 WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
275 }
276 if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
277 WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK);
278 }
279 } else {
280 *r500_disp_int = 0;
281 }
282
283 if (irqs) {
284 WREG32(RADEON_GEN_INT_STATUS, irqs);
285 }
286 return irqs & irq_mask;
287}
288
289int rs600_irq_process(struct radeon_device *rdev)
290{
291 uint32_t status;
292 uint32_t r500_disp_int;
293
294 status = rs600_irq_ack(rdev, &r500_disp_int);
295 if (!status && !r500_disp_int) {
296 return IRQ_NONE;
297 }
298 while (status || r500_disp_int) {
299 /* SW interrupt */
300 if (status & RADEON_SW_INT_TEST) {
301 radeon_fence_process(rdev);
302 }
303 /* Vertical blank interrupts */
304 if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) {
305 drm_handle_vblank(rdev->ddev, 0);
306 }
307 if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) {
308 drm_handle_vblank(rdev->ddev, 1);
309 }
310 status = rs600_irq_ack(rdev, &r500_disp_int);
311 }
312 return IRQ_HANDLED;
313}
314
315u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
316{
317 if (crtc == 0)
318 return RREG32(AVIVO_D1CRTC_FRAME_COUNT);
319 else
320 return RREG32(AVIVO_D2CRTC_FRAME_COUNT);
321}
322
323
324/*
243 * Global GPU functions 325 * Global GPU functions
244 */ 326 */
245void rs600_disable_vga(struct radeon_device *rdev) 327void rs600_disable_vga(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 839595b00728..bc6b7c5339bc 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -652,3 +652,68 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
652 WREG32(RS690_MC_DATA, v); 652 WREG32(RS690_MC_DATA, v);
653 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); 653 WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);
654} 654}
655
656static const unsigned rs690_reg_safe_bm[219] = {
657 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
658 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
659 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
660 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
661 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
662 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
663 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
664 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
665 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
666 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
667 0x17FF1FFF,0xFFFFFFFC,0xFFFFFFFF,0xFF30FFBF,
668 0xFFFFFFF8,0xC3E6FFFF,0xFFFFF6DF,0xFFFFFFFF,
669 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
670 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
671 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFF03F,
672 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
673 0xFFFFFFFF,0xFFFFEFCE,0xF00EBFFF,0x007C0000,
674 0xF0000078,0xFF000009,0xFFFFFFFF,0xFFFFFFFF,
675 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
676 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
677 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
678 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
679 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
680 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
681 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
682 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
683 0xFFFFF7FF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
684 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
685 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
686 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
687 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
688 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
689 0xFFFFFC78,0xFFFFFFFF,0xFFFFFFFE,0xFFFFFFFF,
690 0x38FF8F50,0xFFF88082,0xF000000C,0xFAE009FF,
691 0x0000FFFF,0xFFFFFFFF,0xFFFFFFFF,0x00000000,
692 0x00000000,0x0000C100,0x00000000,0x00000000,
693 0x00000000,0x00000000,0x00000000,0x00000000,
694 0x00000000,0xFFFF0000,0xFFFFFFFF,0xFF80FFFF,
695 0x00000000,0x00000000,0x00000000,0x00000000,
696 0x0003FC01,0xFFFFFFF8,0xFE800B19,0xFFFFFFFF,
697 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
698 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
699 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
700 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
701 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
702 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
703 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
704 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
705 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
706 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
707 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
708 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
709 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
710 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
711 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
712};
713
714int rs690_init(struct radeon_device *rdev)
715{
716 rdev->config.r300.reg_safe_bm = rs690_reg_safe_bm;
717 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs690_reg_safe_bm);
718 return 0;
719}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index fd8f3ca716ea..31a7f668ae5a 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -400,25 +400,6 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
400 WREG32(MC_IND_INDEX, 0); 400 WREG32(MC_IND_INDEX, 0);
401} 401}
402 402
403uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
404{
405 uint32_t r;
406
407 WREG32(PCIE_INDEX, ((reg) & 0x7ff));
408 (void)RREG32(PCIE_INDEX);
409 r = RREG32(PCIE_DATA);
410 return r;
411}
412
413void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
414{
415 WREG32(PCIE_INDEX, ((reg) & 0x7ff));
416 (void)RREG32(PCIE_INDEX);
417 WREG32(PCIE_DATA, (v));
418 (void)RREG32(PCIE_DATA);
419}
420
421
422/* 403/*
423 * Debugfs info 404 * Debugfs info
424 */ 405 */
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d258b02aef44..827da0858136 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -674,7 +674,14 @@ omap_i2c_isr(int this_irq, void *dev_id)
674 674
675 err = 0; 675 err = 0;
676complete: 676complete:
677 omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat); 677 /*
678 * Ack the stat in one go, but [R/X]DR and [R/X]RDY should be
679 * acked after the data operation is complete.
680 * Ref: TRM SWPU114Q Figure 18-31
681 */
682 omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, stat &
683 ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
684 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
678 685
679 if (stat & OMAP_I2C_STAT_NACK) { 686 if (stat & OMAP_I2C_STAT_NACK) {
680 err |= OMAP_I2C_STAT_NACK; 687 err |= OMAP_I2C_STAT_NACK;
@@ -687,6 +694,9 @@ complete:
687 } 694 }
688 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | 695 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
689 OMAP_I2C_STAT_AL)) { 696 OMAP_I2C_STAT_AL)) {
697 omap_i2c_ack_stat(dev, stat &
698 (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
699 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
690 omap_i2c_complete_cmd(dev, err); 700 omap_i2c_complete_cmd(dev, err);
691 return IRQ_HANDLED; 701 return IRQ_HANDLED;
692 } 702 }
@@ -774,7 +784,7 @@ complete:
774 * memory to the I2C interface. 784 * memory to the I2C interface.
775 */ 785 */
776 786
777 if (cpu_is_omap34xx()) { 787 if (dev->rev <= OMAP_I2C_REV_ON_3430) {
778 while (!(stat & OMAP_I2C_STAT_XUDF)) { 788 while (!(stat & OMAP_I2C_STAT_XUDF)) {
779 if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { 789 if (stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
780 omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); 790 omap_i2c_ack_stat(dev, stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 182e711318ba..d2728a28a8db 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -117,7 +117,8 @@ enum stu300_error {
117 STU300_ERROR_NONE = 0, 117 STU300_ERROR_NONE = 0,
118 STU300_ERROR_ACKNOWLEDGE_FAILURE, 118 STU300_ERROR_ACKNOWLEDGE_FAILURE,
119 STU300_ERROR_BUS_ERROR, 119 STU300_ERROR_BUS_ERROR,
120 STU300_ERROR_ARBITRATION_LOST 120 STU300_ERROR_ARBITRATION_LOST,
121 STU300_ERROR_UNKNOWN
121}; 122};
122 123
123/* timeout waiting for the controller to respond */ 124/* timeout waiting for the controller to respond */
@@ -127,7 +128,7 @@ enum stu300_error {
127 * The number of address send athemps tried before giving up. 128 * The number of address send athemps tried before giving up.
128 * If the first one failes it seems like 5 to 8 attempts are required. 129 * If the first one failes it seems like 5 to 8 attempts are required.
129 */ 130 */
130#define NUM_ADDR_RESEND_ATTEMPTS 10 131#define NUM_ADDR_RESEND_ATTEMPTS 12
131 132
132/* I2C clock speed, in Hz 0-400kHz*/ 133/* I2C clock speed, in Hz 0-400kHz*/
133static unsigned int scl_frequency = 100000; 134static unsigned int scl_frequency = 100000;
@@ -149,6 +150,7 @@ module_param(scl_frequency, uint, 0644);
149 * @msg_index: index of current message 150 * @msg_index: index of current message
150 * @msg_len: length of current message 151 * @msg_len: length of current message
151 */ 152 */
153
152struct stu300_dev { 154struct stu300_dev {
153 struct platform_device *pdev; 155 struct platform_device *pdev;
154 struct i2c_adapter adapter; 156 struct i2c_adapter adapter;
@@ -188,6 +190,27 @@ static inline u32 stu300_r8(void __iomem *address)
188 return readl(address) & 0x000000FFU; 190 return readl(address) & 0x000000FFU;
189} 191}
190 192
193static void stu300_irq_enable(struct stu300_dev *dev)
194{
195 u32 val;
196 val = stu300_r8(dev->virtbase + I2C_CR);
197 val |= I2C_CR_INTERRUPT_ENABLE;
198 /* Twice paranoia (possible HW glitch) */
199 stu300_wr8(val, dev->virtbase + I2C_CR);
200 stu300_wr8(val, dev->virtbase + I2C_CR);
201}
202
203static void stu300_irq_disable(struct stu300_dev *dev)
204{
205 u32 val;
206 val = stu300_r8(dev->virtbase + I2C_CR);
207 val &= ~I2C_CR_INTERRUPT_ENABLE;
208 /* Twice paranoia (possible HW glitch) */
209 stu300_wr8(val, dev->virtbase + I2C_CR);
210 stu300_wr8(val, dev->virtbase + I2C_CR);
211}
212
213
191/* 214/*
192 * Tells whether a certain event or events occurred in 215 * Tells whether a certain event or events occurred in
193 * response to a command. The events represent states in 216 * response to a command. The events represent states in
@@ -196,9 +219,10 @@ static inline u32 stu300_r8(void __iomem *address)
196 * documentation and can only be treated as abstract state 219 * documentation and can only be treated as abstract state
197 * machine states. 220 * machine states.
198 * 221 *
199 * @ret 0 = event has not occurred, any other value means 222 * @ret 0 = event has not occurred or unknown error, any
200 * the event occurred. 223 * other value means the correct event occurred or an error.
201 */ 224 */
225
202static int stu300_event_occurred(struct stu300_dev *dev, 226static int stu300_event_occurred(struct stu300_dev *dev,
203 enum stu300_event mr_event) { 227 enum stu300_event mr_event) {
204 u32 status1; 228 u32 status1;
@@ -206,11 +230,28 @@ static int stu300_event_occurred(struct stu300_dev *dev,
206 230
207 /* What event happened? */ 231 /* What event happened? */
208 status1 = stu300_r8(dev->virtbase + I2C_SR1); 232 status1 = stu300_r8(dev->virtbase + I2C_SR1);
233
209 if (!(status1 & I2C_SR1_EVF_IND)) 234 if (!(status1 & I2C_SR1_EVF_IND))
210 /* No event at all */ 235 /* No event at all */
211 return 0; 236 return 0;
237
212 status2 = stu300_r8(dev->virtbase + I2C_SR2); 238 status2 = stu300_r8(dev->virtbase + I2C_SR2);
213 239
240 /* Block any multiple interrupts */
241 stu300_irq_disable(dev);
242
243 /* Check for errors first */
244 if (status2 & I2C_SR2_AF_IND) {
245 dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
246 return 1;
247 } else if (status2 & I2C_SR2_BERR_IND) {
248 dev->cmd_err = STU300_ERROR_BUS_ERROR;
249 return 1;
250 } else if (status2 & I2C_SR2_ARLO_IND) {
251 dev->cmd_err = STU300_ERROR_ARBITRATION_LOST;
252 return 1;
253 }
254
214 switch (mr_event) { 255 switch (mr_event) {
215 case STU300_EVENT_1: 256 case STU300_EVENT_1:
216 if (status1 & I2C_SR1_ADSL_IND) 257 if (status1 & I2C_SR1_ADSL_IND)
@@ -221,10 +262,6 @@ static int stu300_event_occurred(struct stu300_dev *dev,
221 case STU300_EVENT_7: 262 case STU300_EVENT_7:
222 case STU300_EVENT_8: 263 case STU300_EVENT_8:
223 if (status1 & I2C_SR1_BTF_IND) { 264 if (status1 & I2C_SR1_BTF_IND) {
224 if (status2 & I2C_SR2_AF_IND)
225 dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
226 else if (status2 & I2C_SR2_BERR_IND)
227 dev->cmd_err = STU300_ERROR_BUS_ERROR;
228 return 1; 265 return 1;
229 } 266 }
230 break; 267 break;
@@ -240,8 +277,6 @@ static int stu300_event_occurred(struct stu300_dev *dev,
240 case STU300_EVENT_6: 277 case STU300_EVENT_6:
241 if (status2 & I2C_SR2_ENDAD_IND) { 278 if (status2 & I2C_SR2_ENDAD_IND) {
242 /* First check for any errors */ 279 /* First check for any errors */
243 if (status2 & I2C_SR2_AF_IND)
244 dev->cmd_err = STU300_ERROR_ACKNOWLEDGE_FAILURE;
245 return 1; 280 return 1;
246 } 281 }
247 break; 282 break;
@@ -252,8 +287,15 @@ static int stu300_event_occurred(struct stu300_dev *dev,
252 default: 287 default:
253 break; 288 break;
254 } 289 }
255 if (status2 & I2C_SR2_ARLO_IND) 290 /* If we get here, we're on thin ice.
256 dev->cmd_err = STU300_ERROR_ARBITRATION_LOST; 291 * Here we are in a status where we have
292 * gotten a response that does not match
293 * what we requested.
294 */
295 dev->cmd_err = STU300_ERROR_UNKNOWN;
296 dev_err(&dev->pdev->dev,
297 "Unhandled interrupt! %d sr1: 0x%x sr2: 0x%x\n",
298 mr_event, status1, status2);
257 return 0; 299 return 0;
258} 300}
259 301
@@ -262,21 +304,20 @@ static irqreturn_t stu300_irh(int irq, void *data)
262 struct stu300_dev *dev = data; 304 struct stu300_dev *dev = data;
263 int res; 305 int res;
264 306
307 /* Just make sure that the block is clocked */
308 clk_enable(dev->clk);
309
265 /* See if this was what we were waiting for */ 310 /* See if this was what we were waiting for */
266 spin_lock(&dev->cmd_issue_lock); 311 spin_lock(&dev->cmd_issue_lock);
267 if (dev->cmd_event != STU300_EVENT_NONE) { 312
268 res = stu300_event_occurred(dev, dev->cmd_event); 313 res = stu300_event_occurred(dev, dev->cmd_event);
269 if (res || dev->cmd_err != STU300_ERROR_NONE) { 314 if (res || dev->cmd_err != STU300_ERROR_NONE)
270 u32 val; 315 complete(&dev->cmd_complete);
271 316
272 complete(&dev->cmd_complete);
273 /* Block any multiple interrupts */
274 val = stu300_r8(dev->virtbase + I2C_CR);
275 val &= ~I2C_CR_INTERRUPT_ENABLE;
276 stu300_wr8(val, dev->virtbase + I2C_CR);
277 }
278 }
279 spin_unlock(&dev->cmd_issue_lock); 317 spin_unlock(&dev->cmd_issue_lock);
318
319 clk_disable(dev->clk);
320
280 return IRQ_HANDLED; 321 return IRQ_HANDLED;
281} 322}
282 323
@@ -308,7 +349,6 @@ static int stu300_start_and_await_event(struct stu300_dev *dev,
308 stu300_wr8(cr_value, dev->virtbase + I2C_CR); 349 stu300_wr8(cr_value, dev->virtbase + I2C_CR);
309 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 350 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
310 STU300_TIMEOUT); 351 STU300_TIMEOUT);
311
312 if (ret < 0) { 352 if (ret < 0) {
313 dev_err(&dev->pdev->dev, 353 dev_err(&dev->pdev->dev,
314 "wait_for_completion_interruptible_timeout() " 354 "wait_for_completion_interruptible_timeout() "
@@ -342,7 +382,6 @@ static int stu300_await_event(struct stu300_dev *dev,
342 enum stu300_event mr_event) 382 enum stu300_event mr_event)
343{ 383{
344 int ret; 384 int ret;
345 u32 val;
346 385
347 if (unlikely(irqs_disabled())) { 386 if (unlikely(irqs_disabled())) {
348 /* TODO: implement polling for this case if need be. */ 387 /* TODO: implement polling for this case if need be. */
@@ -354,36 +393,18 @@ static int stu300_await_event(struct stu300_dev *dev,
354 /* Is it already here? */ 393 /* Is it already here? */
355 spin_lock_irq(&dev->cmd_issue_lock); 394 spin_lock_irq(&dev->cmd_issue_lock);
356 dev->cmd_err = STU300_ERROR_NONE; 395 dev->cmd_err = STU300_ERROR_NONE;
357 if (stu300_event_occurred(dev, mr_event)) {
358 spin_unlock_irq(&dev->cmd_issue_lock);
359 goto exit_await_check_err;
360 }
361 init_completion(&dev->cmd_complete);
362 dev->cmd_err = STU300_ERROR_NONE;
363 dev->cmd_event = mr_event; 396 dev->cmd_event = mr_event;
364 397
365 /* Turn on the I2C interrupt for current operation */ 398 init_completion(&dev->cmd_complete);
366 val = stu300_r8(dev->virtbase + I2C_CR);
367 val |= I2C_CR_INTERRUPT_ENABLE;
368 stu300_wr8(val, dev->virtbase + I2C_CR);
369
370 /* Twice paranoia (possible HW glitch) */
371 stu300_wr8(val, dev->virtbase + I2C_CR);
372 399
373 /* Check again: is it already here? */ 400 /* Turn on the I2C interrupt for current operation */
374 if (unlikely(stu300_event_occurred(dev, mr_event))) { 401 stu300_irq_enable(dev);
375 /* Disable IRQ again. */
376 val &= ~I2C_CR_INTERRUPT_ENABLE;
377 stu300_wr8(val, dev->virtbase + I2C_CR);
378 spin_unlock_irq(&dev->cmd_issue_lock);
379 goto exit_await_check_err;
380 }
381 402
382 /* Unlock the command block and wait for the event to occur */ 403 /* Unlock the command block and wait for the event to occur */
383 spin_unlock_irq(&dev->cmd_issue_lock); 404 spin_unlock_irq(&dev->cmd_issue_lock);
405
384 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, 406 ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
385 STU300_TIMEOUT); 407 STU300_TIMEOUT);
386
387 if (ret < 0) { 408 if (ret < 0) {
388 dev_err(&dev->pdev->dev, 409 dev_err(&dev->pdev->dev,
389 "wait_for_completion_interruptible_timeout()" 410 "wait_for_completion_interruptible_timeout()"
@@ -401,7 +422,6 @@ static int stu300_await_event(struct stu300_dev *dev,
401 return -ETIMEDOUT; 422 return -ETIMEDOUT;
402 } 423 }
403 424
404 exit_await_check_err:
405 if (dev->cmd_err != STU300_ERROR_NONE) { 425 if (dev->cmd_err != STU300_ERROR_NONE) {
406 if (mr_event != STU300_EVENT_6) { 426 if (mr_event != STU300_EVENT_6) {
407 dev_err(&dev->pdev->dev, "controller " 427 dev_err(&dev->pdev->dev, "controller "
@@ -457,18 +477,19 @@ struct stu300_clkset {
457}; 477};
458 478
459static const struct stu300_clkset stu300_clktable[] = { 479static const struct stu300_clkset stu300_clktable[] = {
460 { 0, 0xFFU }, 480 { 0, 0xFFU },
461 { 2500000, I2C_OAR2_FR_25_10MHZ }, 481 { 2500000, I2C_OAR2_FR_25_10MHZ },
462 { 10000000, I2C_OAR2_FR_10_1667MHZ }, 482 { 10000000, I2C_OAR2_FR_10_1667MHZ },
463 { 16670000, I2C_OAR2_FR_1667_2667MHZ }, 483 { 16670000, I2C_OAR2_FR_1667_2667MHZ },
464 { 26670000, I2C_OAR2_FR_2667_40MHZ }, 484 { 26670000, I2C_OAR2_FR_2667_40MHZ },
465 { 40000000, I2C_OAR2_FR_40_5333MHZ }, 485 { 40000000, I2C_OAR2_FR_40_5333MHZ },
466 { 53330000, I2C_OAR2_FR_5333_66MHZ }, 486 { 53330000, I2C_OAR2_FR_5333_66MHZ },
467 { 66000000, I2C_OAR2_FR_66_80MHZ }, 487 { 66000000, I2C_OAR2_FR_66_80MHZ },
468 { 80000000, I2C_OAR2_FR_80_100MHZ }, 488 { 80000000, I2C_OAR2_FR_80_100MHZ },
469 { 100000000, 0xFFU }, 489 { 100000000, 0xFFU },
470}; 490};
471 491
492
472static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate) 493static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
473{ 494{
474 495
@@ -494,10 +515,10 @@ static int stu300_set_clk(struct stu300_dev *dev, unsigned long clkrate)
494 515
495 if (dev->speed > 100000) 516 if (dev->speed > 100000)
496 /* Fast Mode I2C */ 517 /* Fast Mode I2C */
497 val = ((clkrate/dev->speed)-9)/3; 518 val = ((clkrate/dev->speed) - 9)/3 + 1;
498 else 519 else
499 /* Standard Mode I2C */ 520 /* Standard Mode I2C */
500 val = ((clkrate/dev->speed)-7)/2; 521 val = ((clkrate/dev->speed) - 7)/2 + 1;
501 522
502 /* According to spec the divider must be > 2 */ 523 /* According to spec the divider must be > 2 */
503 if (val < 0x002) { 524 if (val < 0x002) {
@@ -557,6 +578,7 @@ static int stu300_init_hw(struct stu300_dev *dev)
557 */ 578 */
558 clkrate = clk_get_rate(dev->clk); 579 clkrate = clk_get_rate(dev->clk);
559 ret = stu300_set_clk(dev, clkrate); 580 ret = stu300_set_clk(dev, clkrate);
581
560 if (ret) 582 if (ret)
561 return ret; 583 return ret;
562 /* 584 /*
@@ -641,7 +663,6 @@ static int stu300_xfer_msg(struct i2c_adapter *adap,
641 int attempts = 0; 663 int attempts = 0;
642 struct stu300_dev *dev = i2c_get_adapdata(adap); 664 struct stu300_dev *dev = i2c_get_adapdata(adap);
643 665
644
645 clk_enable(dev->clk); 666 clk_enable(dev->clk);
646 667
647 /* Remove this if (0) to trace each and every message. */ 668 /* Remove this if (0) to trace each and every message. */
@@ -715,14 +736,15 @@ static int stu300_xfer_msg(struct i2c_adapter *adap,
715 736
716 if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) { 737 if (attempts < NUM_ADDR_RESEND_ATTEMPTS && attempts > 0) {
717 dev_dbg(&dev->pdev->dev, "managed to get address " 738 dev_dbg(&dev->pdev->dev, "managed to get address "
718 "through after %d attempts\n", attempts); 739 "through after %d attempts\n", attempts);
719 } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) { 740 } else if (attempts == NUM_ADDR_RESEND_ATTEMPTS) {
720 dev_dbg(&dev->pdev->dev, "I give up, tried %d times " 741 dev_dbg(&dev->pdev->dev, "I give up, tried %d times "
721 "to resend address.\n", 742 "to resend address.\n",
722 NUM_ADDR_RESEND_ATTEMPTS); 743 NUM_ADDR_RESEND_ATTEMPTS);
723 goto exit_disable; 744 goto exit_disable;
724 } 745 }
725 746
747
726 if (msg->flags & I2C_M_RD) { 748 if (msg->flags & I2C_M_RD) {
727 /* READ: we read the actual bytes one at a time */ 749 /* READ: we read the actual bytes one at a time */
728 for (i = 0; i < msg->len; i++) { 750 for (i = 0; i < msg->len; i++) {
@@ -804,8 +826,10 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
804{ 826{
805 int ret = -1; 827 int ret = -1;
806 int i; 828 int i;
829
807 struct stu300_dev *dev = i2c_get_adapdata(adap); 830 struct stu300_dev *dev = i2c_get_adapdata(adap);
808 dev->msg_len = num; 831 dev->msg_len = num;
832
809 for (i = 0; i < num; i++) { 833 for (i = 0; i < num; i++) {
810 /* 834 /*
811 * Another driver appears to send stop for each message, 835 * Another driver appears to send stop for each message,
@@ -817,6 +841,7 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
817 dev->msg_index = i; 841 dev->msg_index = i;
818 842
819 ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1))); 843 ret = stu300_xfer_msg(adap, &msgs[i], (i == (num - 1)));
844
820 if (ret != 0) { 845 if (ret != 0) {
821 num = ret; 846 num = ret;
822 break; 847 break;
@@ -845,6 +870,7 @@ stu300_probe(struct platform_device *pdev)
845 struct resource *res; 870 struct resource *res;
846 int bus_nr; 871 int bus_nr;
847 int ret = 0; 872 int ret = 0;
873 char clk_name[] = "I2C0";
848 874
849 dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL); 875 dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL);
850 if (!dev) { 876 if (!dev) {
@@ -854,7 +880,8 @@ stu300_probe(struct platform_device *pdev)
854 } 880 }
855 881
856 bus_nr = pdev->id; 882 bus_nr = pdev->id;
857 dev->clk = clk_get(&pdev->dev, NULL); 883 clk_name[3] += (char)bus_nr;
884 dev->clk = clk_get(&pdev->dev, clk_name);
858 if (IS_ERR(dev->clk)) { 885 if (IS_ERR(dev->clk)) {
859 ret = PTR_ERR(dev->clk); 886 ret = PTR_ERR(dev->clk);
860 dev_err(&pdev->dev, "could not retrieve i2c bus clock\n"); 887 dev_err(&pdev->dev, "could not retrieve i2c bus clock\n");
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 4cfd084fa897..9a1d55b74d7a 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -456,8 +456,11 @@ static int joydev_ioctl_common(struct joydev *joydev,
456 unsigned int cmd, void __user *argp) 456 unsigned int cmd, void __user *argp)
457{ 457{
458 struct input_dev *dev = joydev->handle.dev; 458 struct input_dev *dev = joydev->handle.dev;
459 size_t len;
459 int i, j; 460 int i, j;
461 const char *name;
460 462
463 /* Process fixed-sized commands. */
461 switch (cmd) { 464 switch (cmd) {
462 465
463 case JS_SET_CAL: 466 case JS_SET_CAL:
@@ -499,9 +502,22 @@ static int joydev_ioctl_common(struct joydev *joydev,
499 return copy_to_user(argp, joydev->corr, 502 return copy_to_user(argp, joydev->corr,
500 sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0; 503 sizeof(joydev->corr[0]) * joydev->nabs) ? -EFAULT : 0;
501 504
502 case JSIOCSAXMAP: 505 }
503 if (copy_from_user(joydev->abspam, argp, 506
504 sizeof(__u8) * (ABS_MAX + 1))) 507 /*
508 * Process variable-sized commands (the axis and button map commands
509 * are considered variable-sized to decouple them from the values of
510 * ABS_MAX and KEY_MAX).
511 */
512 switch (cmd & ~IOCSIZE_MASK) {
513
514 case (JSIOCSAXMAP & ~IOCSIZE_MASK):
515 len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->abspam));
516 /*
517 * FIXME: we should not copy into our axis map before
518 * validating the data.
519 */
520 if (copy_from_user(joydev->abspam, argp, len))
505 return -EFAULT; 521 return -EFAULT;
506 522
507 for (i = 0; i < joydev->nabs; i++) { 523 for (i = 0; i < joydev->nabs; i++) {
@@ -511,13 +527,17 @@ static int joydev_ioctl_common(struct joydev *joydev,
511 } 527 }
512 return 0; 528 return 0;
513 529
514 case JSIOCGAXMAP: 530 case (JSIOCGAXMAP & ~IOCSIZE_MASK):
515 return copy_to_user(argp, joydev->abspam, 531 len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->abspam));
516 sizeof(__u8) * (ABS_MAX + 1)) ? -EFAULT : 0; 532 return copy_to_user(argp, joydev->abspam, len) ? -EFAULT : 0;
517 533
518 case JSIOCSBTNMAP: 534 case (JSIOCSBTNMAP & ~IOCSIZE_MASK):
519 if (copy_from_user(joydev->keypam, argp, 535 len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->keypam));
520 sizeof(__u16) * (KEY_MAX - BTN_MISC + 1))) 536 /*
537 * FIXME: we should not copy into our keymap before
538 * validating the data.
539 */
540 if (copy_from_user(joydev->keypam, argp, len))
521 return -EFAULT; 541 return -EFAULT;
522 542
523 for (i = 0; i < joydev->nkey; i++) { 543 for (i = 0; i < joydev->nkey; i++) {
@@ -529,25 +549,19 @@ static int joydev_ioctl_common(struct joydev *joydev,
529 549
530 return 0; 550 return 0;
531 551
532 case JSIOCGBTNMAP: 552 case (JSIOCGBTNMAP & ~IOCSIZE_MASK):
533 return copy_to_user(argp, joydev->keypam, 553 len = min_t(size_t, _IOC_SIZE(cmd), sizeof(joydev->keypam));
534 sizeof(__u16) * (KEY_MAX - BTN_MISC + 1)) ? -EFAULT : 0; 554 return copy_to_user(argp, joydev->keypam, len) ? -EFAULT : 0;
535 555
536 default: 556 case JSIOCGNAME(0):
537 if ((cmd & ~IOCSIZE_MASK) == JSIOCGNAME(0)) { 557 name = dev->name;
538 int len; 558 if (!name)
539 const char *name = dev->name; 559 return 0;
540 560
541 if (!name) 561 len = min_t(size_t, _IOC_SIZE(cmd), strlen(name) + 1);
542 return 0; 562 return copy_to_user(argp, name, len) ? -EFAULT : len;
543 len = strlen(name) + 1;
544 if (len > _IOC_SIZE(cmd))
545 len = _IOC_SIZE(cmd);
546 if (copy_to_user(argp, name, len))
547 return -EFAULT;
548 return len;
549 }
550 } 563 }
564
551 return -EINVAL; 565 return -EINVAL;
552} 566}
553 567
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index baabf8302645..f6c688cae334 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -74,6 +74,7 @@ static struct iforce_device iforce_device[] = {
74 { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_avb_wheel, abs_wheel, ff_iforce }, 74 { 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_avb_wheel, abs_wheel, ff_iforce },
75 { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_avb_tw, abs_wheel, ff_iforce }, //? 75 { 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_avb_tw, abs_wheel, ff_iforce }, //?
76 { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //? 76 { 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
77 { 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
77 { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //? 78 { 0x06f8, 0x0001, "Guillemot Race Leader Force Feedback", btn_wheel, abs_wheel, ff_iforce }, //?
78 { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //? 79 { 0x06f8, 0x0004, "Guillemot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
79 { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //? 80 { 0x06f8, 0x0004, "Gullemot Jet Leader 3D", btn_joystick, abs_joystick, ff_iforce }, //?
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index f83185aeb511..9f289d8f52c6 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -223,6 +223,7 @@ static struct usb_device_id iforce_usb_ids [] = {
223 { USB_DEVICE(0x05ef, 0x8884) }, /* AVB Mag Turbo Force */ 223 { USB_DEVICE(0x05ef, 0x8884) }, /* AVB Mag Turbo Force */
224 { USB_DEVICE(0x05ef, 0x8888) }, /* AVB Top Shot FFB Racing Wheel */ 224 { USB_DEVICE(0x05ef, 0x8888) }, /* AVB Top Shot FFB Racing Wheel */
225 { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */ 225 { USB_DEVICE(0x061c, 0xc0a4) }, /* ACT LABS Force RS */
226 { USB_DEVICE(0x061c, 0xc084) }, /* ACT LABS Force RS */
226 { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */ 227 { USB_DEVICE(0x06f8, 0x0001) }, /* Guillemot Race Leader Force Feedback */
227 { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */ 228 { USB_DEVICE(0x06f8, 0x0004) }, /* Guillemot Force Feedback Racing Wheel */
228 { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */ 229 { USB_DEVICE(0x06f8, 0xa302) }, /* Guillemot Jet Leader 3D */
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 95fe0452dae4..6c6a09b1c0fe 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -880,6 +880,14 @@ static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
880}; 880};
881 881
882/* 882/*
883 * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
884 * release for their volume buttons
885 */
886static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
887 0xae, 0xb0, -1U
888};
889
890/*
883 * Samsung NC10,NC20 with Fn+F? key release not working 891 * Samsung NC10,NC20 with Fn+F? key release not working
884 */ 892 */
885static unsigned int atkbd_samsung_forced_release_keys[] = { 893static unsigned int atkbd_samsung_forced_release_keys[] = {
@@ -1537,6 +1545,33 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
1537 .driver_data = atkbd_hp_zv6100_forced_release_keys, 1545 .driver_data = atkbd_hp_zv6100_forced_release_keys,
1538 }, 1546 },
1539 { 1547 {
1548 .ident = "HP Presario R4000",
1549 .matches = {
1550 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1551 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
1552 },
1553 .callback = atkbd_setup_forced_release,
1554 .driver_data = atkbd_hp_r4000_forced_release_keys,
1555 },
1556 {
1557 .ident = "HP Presario R4100",
1558 .matches = {
1559 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1560 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
1561 },
1562 .callback = atkbd_setup_forced_release,
1563 .driver_data = atkbd_hp_r4000_forced_release_keys,
1564 },
1565 {
1566 .ident = "HP Presario R4200",
1567 .matches = {
1568 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
1569 DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
1570 },
1571 .callback = atkbd_setup_forced_release,
1572 .driver_data = atkbd_hp_r4000_forced_release_keys,
1573 },
1574 {
1540 .ident = "Inventec Symphony", 1575 .ident = "Inventec Symphony",
1541 .matches = { 1576 .matches = {
1542 DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), 1577 DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"),
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ae04d8a494e5..ccbf23ece8e3 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -382,6 +382,14 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
382 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), 382 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
383 }, 383 },
384 }, 384 },
385 {
386 .ident = "Acer Aspire 5536",
387 .matches = {
388 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
389 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
390 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
391 },
392 },
385 { } 393 { }
386}; 394};
387 395
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index a9d5031b855e..ea30c983a33e 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -388,6 +388,32 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
388 return result; 388 return result;
389} 389}
390 390
391static int wacom_query_tablet_data(struct usb_interface *intf)
392{
393 unsigned char *rep_data;
394 int limit = 0;
395 int error;
396
397 rep_data = kmalloc(2, GFP_KERNEL);
398 if (!rep_data)
399 return -ENOMEM;
400
401 do {
402 rep_data[0] = 2;
403 rep_data[1] = 2;
404 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
405 2, rep_data, 2);
406 if (error >= 0)
407 error = usb_get_report(intf,
408 WAC_HID_FEATURE_REPORT, 2,
409 rep_data, 2);
410 } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
411
412 kfree(rep_data);
413
414 return error < 0 ? error : 0;
415}
416
391static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id) 417static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
392{ 418{
393 struct usb_device *dev = interface_to_usbdev(intf); 419 struct usb_device *dev = interface_to_usbdev(intf);
@@ -398,7 +424,6 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
398 struct wacom_features *features; 424 struct wacom_features *features;
399 struct input_dev *input_dev; 425 struct input_dev *input_dev;
400 int error = -ENOMEM; 426 int error = -ENOMEM;
401 char rep_data[2], limit = 0;
402 struct hid_descriptor *hid_desc; 427 struct hid_descriptor *hid_desc;
403 428
404 wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL); 429 wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
@@ -489,20 +514,10 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
489 514
490 /* 515 /*
491 * Ask the tablet to report tablet data if it is not a Tablet PC. 516 * Ask the tablet to report tablet data if it is not a Tablet PC.
492 * Repeat until it succeeds 517 * Note that if query fails it is not a hard failure.
493 */ 518 */
494 if (wacom_wac->features->type != TABLETPC) { 519 if (wacom_wac->features->type != TABLETPC)
495 do { 520 wacom_query_tablet_data(intf);
496 rep_data[0] = 2;
497 rep_data[1] = 2;
498 error = usb_set_report(intf, WAC_HID_FEATURE_REPORT,
499 2, rep_data, 2);
500 if (error >= 0)
501 error = usb_get_report(intf,
502 WAC_HID_FEATURE_REPORT, 2,
503 rep_data, 2);
504 } while ((error < 0 || rep_data[1] != 2) && limit++ < 5);
505 }
506 521
507 usb_set_intfdata(intf, wacom); 522 usb_set_intfdata(intf, wacom);
508 return 0; 523 return 0;
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 6954f5500108..3a7a58222f83 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -170,11 +170,11 @@ static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
170 ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, isr); 170 ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, isr);
171 ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0); 171 ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
172 172
173 if (isr & UCB_IE_TSPX) { 173 if (isr & UCB_IE_TSPX)
174 ucb1400_ts_irq_disable(ucb->ac97); 174 ucb1400_ts_irq_disable(ucb->ac97);
175 enable_irq(ucb->irq); 175 else
176 } else 176 dev_dbg(&ucb->ts_idev->dev, "ucb1400: unexpected IE_STATUS = %#x\n", isr);
177 printk(KERN_ERR "ucb1400: unexpected IE_STATUS = %#x\n", isr); 177 enable_irq(ucb->irq);
178} 178}
179 179
180static int ucb1400_ts_thread(void *_ucb) 180static int ucb1400_ts_thread(void *_ucb)
@@ -345,6 +345,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
345static int ucb1400_ts_probe(struct platform_device *dev) 345static int ucb1400_ts_probe(struct platform_device *dev)
346{ 346{
347 int error, x_res, y_res; 347 int error, x_res, y_res;
348 u16 fcsr;
348 struct ucb1400_ts *ucb = dev->dev.platform_data; 349 struct ucb1400_ts *ucb = dev->dev.platform_data;
349 350
350 ucb->ts_idev = input_allocate_device(); 351 ucb->ts_idev = input_allocate_device();
@@ -382,6 +383,14 @@ static int ucb1400_ts_probe(struct platform_device *dev)
382 ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); 383 ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
383 ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); 384 ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
384 385
386 /*
387 * Enable ADC filter to prevent horrible jitter on Colibri.
388 * This also further reduces jitter on boards where ADCSYNC
389 * pin is connected.
390 */
391 fcsr = ucb1400_reg_read(ucb->ac97, UCB_FCSR);
392 ucb1400_reg_write(ucb->ac97, UCB_FCSR, fcsr | UCB_FCSR_AVE);
393
385 ucb1400_adc_enable(ucb->ac97); 394 ucb1400_adc_enable(ucb->ac97);
386 x_res = ucb1400_ts_read_xres(ucb); 395 x_res = ucb1400_ts_read_xres(ucb);
387 y_res = ucb1400_ts_read_yres(ucb); 396 y_res = ucb1400_ts_read_yres(ucb);
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index a247ae63374f..1bc5db4ece0d 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -117,6 +117,9 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
117 117
118 gpio_data->inverted = !!inverted; 118 gpio_data->inverted = !!inverted;
119 119
120 /* After inverting, we need to update the LED. */
121 schedule_work(&gpio_data->work);
122
120 return n; 123 return n;
121} 124}
122static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show, 125static DEVICE_ATTR(inverted, 0644, gpio_trig_inverted_show,
@@ -146,20 +149,26 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
146 return -EINVAL; 149 return -EINVAL;
147 } 150 }
148 151
152 if (gpio_data->gpio == gpio)
153 return n;
154
149 if (!gpio) { 155 if (!gpio) {
150 free_irq(gpio_to_irq(gpio_data->gpio), led); 156 if (gpio_data->gpio != 0)
157 free_irq(gpio_to_irq(gpio_data->gpio), led);
158 gpio_data->gpio = 0;
151 return n; 159 return n;
152 } 160 }
153 161
154 if (gpio_data->gpio > 0 && gpio_data->gpio != gpio)
155 free_irq(gpio_to_irq(gpio_data->gpio), led);
156
157 gpio_data->gpio = gpio;
158 ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq, 162 ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq,
159 IRQF_SHARED | IRQF_TRIGGER_RISING 163 IRQF_SHARED | IRQF_TRIGGER_RISING
160 | IRQF_TRIGGER_FALLING, "ledtrig-gpio", led); 164 | IRQF_TRIGGER_FALLING, "ledtrig-gpio", led);
161 if (ret) 165 if (ret) {
162 dev_err(dev, "request_irq failed with error %d\n", ret); 166 dev_err(dev, "request_irq failed with error %d\n", ret);
167 } else {
168 if (gpio_data->gpio != 0)
169 free_irq(gpio_to_irq(gpio_data->gpio), led);
170 gpio_data->gpio = gpio;
171 }
163 172
164 return ret ? ret : n; 173 return ret ? ret : n;
165} 174}
@@ -211,7 +220,8 @@ static void gpio_trig_deactivate(struct led_classdev *led)
211 device_remove_file(led->dev, &dev_attr_inverted); 220 device_remove_file(led->dev, &dev_attr_inverted);
212 device_remove_file(led->dev, &dev_attr_desired_brightness); 221 device_remove_file(led->dev, &dev_attr_desired_brightness);
213 flush_work(&gpio_data->work); 222 flush_work(&gpio_data->work);
214 free_irq(gpio_to_irq(gpio_data->gpio),led); 223 if (gpio_data->gpio != 0)
224 free_irq(gpio_to_irq(gpio_data->gpio), led);
215 kfree(gpio_data); 225 kfree(gpio_data);
216 } 226 }
217} 227}
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c
index 4d686c0bdea0..9ab5b0c34f0d 100644
--- a/drivers/macintosh/via-maciisi.c
+++ b/drivers/macintosh/via-maciisi.c
@@ -288,7 +288,7 @@ static void maciisi_sync(struct adb_request *req)
288 } 288 }
289 /* This could be BAD... when the ADB controller doesn't respond 289 /* This could be BAD... when the ADB controller doesn't respond
290 * for this long, it's probably not coming back :-( */ 290 * for this long, it's probably not coming back :-( */
291 if(count >= 50) /* Hopefully shouldn't happen */ 291 if (count > 50) /* Hopefully shouldn't happen */
292 printk(KERN_ERR "maciisi_send_request: poll timed out!\n"); 292 printk(KERN_ERR "maciisi_send_request: poll timed out!\n");
293} 293}
294 294
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 3710ff88fc10..556acff3952f 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store,
171 */ 171 */
172 chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); 172 chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
173 173
174 return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
175 error);
176}
177
178int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
179 unsigned long chunk_size_ulong,
180 char **error)
181{
174 /* Check chunk_size is a power of 2 */ 182 /* Check chunk_size is a power of 2 */
175 if (!is_power_of_2(chunk_size_ulong)) { 183 if (!is_power_of_2(chunk_size_ulong)) {
176 *error = "Chunk size is not a power of 2"; 184 *error = "Chunk size is not a power of 2";
@@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store,
183 return -EINVAL; 191 return -EINVAL;
184 } 192 }
185 193
194 if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) {
195 *error = "Chunk size is too high";
196 return -EINVAL;
197 }
198
186 store->chunk_size = chunk_size_ulong; 199 store->chunk_size = chunk_size_ulong;
187 store->chunk_mask = chunk_size_ulong - 1; 200 store->chunk_mask = chunk_size_ulong - 1;
188 store->chunk_shift = ffs(chunk_size_ulong) - 1; 201 store->chunk_shift = ffs(chunk_size_ulong) - 1;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 2442c8c07898..812c71872ba0 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
168int dm_exception_store_type_register(struct dm_exception_store_type *type); 168int dm_exception_store_type_register(struct dm_exception_store_type *type);
169int dm_exception_store_type_unregister(struct dm_exception_store_type *type); 169int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
170 170
171int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
172 unsigned long chunk_size_ulong,
173 char **error);
174
171int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, 175int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
172 unsigned *args_used, 176 unsigned *args_used,
173 struct dm_exception_store **store); 177 struct dm_exception_store **store);
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index e69b96560997..6e186b1a062d 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -21,6 +21,7 @@ struct log_c {
21 struct dm_target *ti; 21 struct dm_target *ti;
22 uint32_t region_size; 22 uint32_t region_size;
23 region_t region_count; 23 region_t region_count;
24 uint64_t luid;
24 char uuid[DM_UUID_LEN]; 25 char uuid[DM_UUID_LEN];
25 26
26 char *usr_argv_str; 27 char *usr_argv_str;
@@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid,
63 * restored. 64 * restored.
64 */ 65 */
65retry: 66retry:
66 r = dm_consult_userspace(uuid, request_type, data, 67 r = dm_consult_userspace(uuid, lc->luid, request_type, data,
67 data_size, rdata, rdata_size); 68 data_size, rdata, rdata_size);
68 69
69 if (r != -ESRCH) 70 if (r != -ESRCH)
@@ -74,14 +75,15 @@ retry:
74 set_current_state(TASK_INTERRUPTIBLE); 75 set_current_state(TASK_INTERRUPTIBLE);
75 schedule_timeout(2*HZ); 76 schedule_timeout(2*HZ);
76 DMWARN("Attempting to contact userspace log server..."); 77 DMWARN("Attempting to contact userspace log server...");
77 r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, 78 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR,
79 lc->usr_argv_str,
78 strlen(lc->usr_argv_str) + 1, 80 strlen(lc->usr_argv_str) + 1,
79 NULL, NULL); 81 NULL, NULL);
80 if (!r) 82 if (!r)
81 break; 83 break;
82 } 84 }
83 DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); 85 DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete");
84 r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, 86 r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL,
85 0, NULL, NULL); 87 0, NULL, NULL);
86 if (!r) 88 if (!r)
87 goto retry; 89 goto retry;
@@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti,
111 return -ENOMEM; 113 return -ENOMEM;
112 } 114 }
113 115
114 for (i = 0, str_size = 0; i < argc; i++) 116 str_size = sprintf(str, "%llu", (unsigned long long)ti->len);
115 str_size += sprintf(str + str_size, "%s ", argv[i]); 117 for (i = 0; i < argc; i++)
116 str_size += sprintf(str + str_size, "%llu", 118 str_size += sprintf(str + str_size, " %s", argv[i]);
117 (unsigned long long)ti->len);
118 119
119 *ctr_str = str; 120 *ctr_str = str;
120 return str_size; 121 return str_size;
@@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
154 return -ENOMEM; 155 return -ENOMEM;
155 } 156 }
156 157
158 /* The ptr value is sufficient for local unique id */
159 lc->luid = (uint64_t)lc;
160
157 lc->ti = ti; 161 lc->ti = ti;
158 162
159 if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { 163 if (strlen(argv[0]) > (DM_UUID_LEN - 1)) {
@@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
173 } 177 }
174 178
175 /* Send table string */ 179 /* Send table string */
176 r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, 180 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR,
177 ctr_str, str_size, NULL, NULL); 181 ctr_str, str_size, NULL, NULL);
178 182
179 if (r == -ESRCH) { 183 if (r == -ESRCH) {
@@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
183 187
184 /* Since the region size does not change, get it now */ 188 /* Since the region size does not change, get it now */
185 rdata_size = sizeof(rdata); 189 rdata_size = sizeof(rdata);
186 r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, 190 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE,
187 NULL, 0, (char *)&rdata, &rdata_size); 191 NULL, 0, (char *)&rdata, &rdata_size);
188 192
189 if (r) { 193 if (r) {
@@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log)
212 int r; 216 int r;
213 struct log_c *lc = log->context; 217 struct log_c *lc = log->context;
214 218
215 r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, 219 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR,
216 NULL, 0, 220 NULL, 0,
217 NULL, NULL); 221 NULL, NULL);
218 222
@@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log)
227 int r; 231 int r;
228 struct log_c *lc = log->context; 232 struct log_c *lc = log->context;
229 233
230 r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, 234 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND,
231 NULL, 0, 235 NULL, 0,
232 NULL, NULL); 236 NULL, NULL);
233 237
@@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log)
239 int r; 243 int r;
240 struct log_c *lc = log->context; 244 struct log_c *lc = log->context;
241 245
242 r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, 246 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND,
243 NULL, 0, 247 NULL, 0,
244 NULL, NULL); 248 NULL, NULL);
245 249
@@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log)
252 struct log_c *lc = log->context; 256 struct log_c *lc = log->context;
253 257
254 lc->in_sync_hint = 0; 258 lc->in_sync_hint = 0;
255 r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, 259 r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME,
256 NULL, 0, 260 NULL, 0,
257 NULL, NULL); 261 NULL, NULL);
258 262
@@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
561 char *result, unsigned maxlen) 565 char *result, unsigned maxlen)
562{ 566{
563 int r = 0; 567 int r = 0;
568 char *table_args;
564 size_t sz = (size_t)maxlen; 569 size_t sz = (size_t)maxlen;
565 struct log_c *lc = log->context; 570 struct log_c *lc = log->context;
566 571
@@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
577 break; 582 break;
578 case STATUSTYPE_TABLE: 583 case STATUSTYPE_TABLE:
579 sz = 0; 584 sz = 0;
580 DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, 585 table_args = strstr(lc->usr_argv_str, " ");
581 lc->uuid, lc->usr_argv_str); 586 BUG_ON(!table_args); /* There will always be a ' ' */
587 table_args++;
588
589 DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc,
590 lc->uuid, table_args);
582 break; 591 break;
583 } 592 }
584 return (r) ? 0 : (int)sz; 593 return (r) ? 0 : (int)sz;
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 8ce74d95ae4d..ba0edad2d048 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data)
147 147
148/** 148/**
149 * dm_consult_userspace 149 * dm_consult_userspace
150 * @uuid: log's uuid (must be DM_UUID_LEN in size) 150 * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size)
151 * @luid: log's local unique identifier
151 * @request_type: found in include/linux/dm-log-userspace.h 152 * @request_type: found in include/linux/dm-log-userspace.h
152 * @data: data to tx to the server 153 * @data: data to tx to the server
153 * @data_size: size of data in bytes 154 * @data_size: size of data in bytes
@@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data)
163 * 164 *
164 * Returns: 0 on success, -EXXX on failure 165 * Returns: 0 on success, -EXXX on failure
165 **/ 166 **/
166int dm_consult_userspace(const char *uuid, int request_type, 167int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
167 char *data, size_t data_size, 168 char *data, size_t data_size,
168 char *rdata, size_t *rdata_size) 169 char *rdata, size_t *rdata_size)
169{ 170{
@@ -190,6 +191,7 @@ resend:
190 191
191 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); 192 memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
192 memcpy(tfr->uuid, uuid, DM_UUID_LEN); 193 memcpy(tfr->uuid, uuid, DM_UUID_LEN);
194 tfr->luid = luid;
193 tfr->seq = dm_ulog_seq++; 195 tfr->seq = dm_ulog_seq++;
194 196
195 /* 197 /*
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h
index c26d8e4e2710..04ee874f9153 100644
--- a/drivers/md/dm-log-userspace-transfer.h
+++ b/drivers/md/dm-log-userspace-transfer.h
@@ -11,7 +11,7 @@
11 11
12int dm_ulog_tfr_init(void); 12int dm_ulog_tfr_init(void);
13void dm_ulog_tfr_exit(void); 13void dm_ulog_tfr_exit(void);
14int dm_consult_userspace(const char *uuid, int request_type, 14int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
15 char *data, size_t data_size, 15 char *data, size_t data_size,
16 char *rdata, size_t *rdata_size); 16 char *rdata, size_t *rdata_size);
17 17
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9726577cde49..33f179e66bf5 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -648,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
648 */ 648 */
649 dm_rh_inc_pending(ms->rh, &sync); 649 dm_rh_inc_pending(ms->rh, &sync);
650 dm_rh_inc_pending(ms->rh, &nosync); 650 dm_rh_inc_pending(ms->rh, &nosync);
651 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; 651
652 /*
653 * If the flush fails on a previous call and succeeds here,
654 * we must not reset the log_failure variable. We need
655 * userspace interaction to do that.
656 */
657 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
652 658
653 /* 659 /*
654 * Dispatch io. 660 * Dispatch io.
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 6e3fe4f14934..d5b2e08750d5 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -106,6 +106,13 @@ struct pstore {
106 void *zero_area; 106 void *zero_area;
107 107
108 /* 108 /*
109 * An area used for header. The header can be written
110 * concurrently with metadata (when invalidating the snapshot),
111 * so it needs a separate buffer.
112 */
113 void *header_area;
114
115 /*
109 * Used to keep track of which metadata area the data in 116 * Used to keep track of which metadata area the data in
110 * 'chunk' refers to. 117 * 'chunk' refers to.
111 */ 118 */
@@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps)
148 */ 155 */
149 ps->area = vmalloc(len); 156 ps->area = vmalloc(len);
150 if (!ps->area) 157 if (!ps->area)
151 return r; 158 goto err_area;
152 159
153 ps->zero_area = vmalloc(len); 160 ps->zero_area = vmalloc(len);
154 if (!ps->zero_area) { 161 if (!ps->zero_area)
155 vfree(ps->area); 162 goto err_zero_area;
156 return r;
157 }
158 memset(ps->zero_area, 0, len); 163 memset(ps->zero_area, 0, len);
159 164
165 ps->header_area = vmalloc(len);
166 if (!ps->header_area)
167 goto err_header_area;
168
160 return 0; 169 return 0;
170
171err_header_area:
172 vfree(ps->zero_area);
173
174err_zero_area:
175 vfree(ps->area);
176
177err_area:
178 return r;
161} 179}
162 180
163static void free_area(struct pstore *ps) 181static void free_area(struct pstore *ps)
@@ -169,6 +187,10 @@ static void free_area(struct pstore *ps)
169 if (ps->zero_area) 187 if (ps->zero_area)
170 vfree(ps->zero_area); 188 vfree(ps->zero_area);
171 ps->zero_area = NULL; 189 ps->zero_area = NULL;
190
191 if (ps->header_area)
192 vfree(ps->header_area);
193 ps->header_area = NULL;
172} 194}
173 195
174struct mdata_req { 196struct mdata_req {
@@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work)
188/* 210/*
189 * Read or write a chunk aligned and sized block of data from a device. 211 * Read or write a chunk aligned and sized block of data from a device.
190 */ 212 */
191static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) 213static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
214 int metadata)
192{ 215{
193 struct dm_io_region where = { 216 struct dm_io_region where = {
194 .bdev = ps->store->cow->bdev, 217 .bdev = ps->store->cow->bdev,
@@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
198 struct dm_io_request io_req = { 221 struct dm_io_request io_req = {
199 .bi_rw = rw, 222 .bi_rw = rw,
200 .mem.type = DM_IO_VMA, 223 .mem.type = DM_IO_VMA,
201 .mem.ptr.vma = ps->area, 224 .mem.ptr.vma = area,
202 .client = ps->io_client, 225 .client = ps->io_client,
203 .notify.fn = NULL, 226 .notify.fn = NULL,
204 }; 227 };
@@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw)
240 263
241 chunk = area_location(ps, ps->current_area); 264 chunk = area_location(ps, ps->current_area);
242 265
243 r = chunk_io(ps, chunk, rw, 0); 266 r = chunk_io(ps, ps->area, chunk, rw, 0);
244 if (r) 267 if (r)
245 return r; 268 return r;
246 269
@@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps)
254 277
255static int zero_disk_area(struct pstore *ps, chunk_t area) 278static int zero_disk_area(struct pstore *ps, chunk_t area)
256{ 279{
257 struct dm_io_region where = { 280 return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
258 .bdev = ps->store->cow->bdev,
259 .sector = ps->store->chunk_size * area_location(ps, area),
260 .count = ps->store->chunk_size,
261 };
262 struct dm_io_request io_req = {
263 .bi_rw = WRITE,
264 .mem.type = DM_IO_VMA,
265 .mem.ptr.vma = ps->zero_area,
266 .client = ps->io_client,
267 .notify.fn = NULL,
268 };
269
270 return dm_io(&io_req, 1, &where, NULL);
271} 281}
272 282
273static int read_header(struct pstore *ps, int *new_snapshot) 283static int read_header(struct pstore *ps, int *new_snapshot)
@@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
276 struct disk_header *dh; 286 struct disk_header *dh;
277 chunk_t chunk_size; 287 chunk_t chunk_size;
278 int chunk_size_supplied = 1; 288 int chunk_size_supplied = 1;
289 char *chunk_err;
279 290
280 /* 291 /*
281 * Use default chunk size (or hardsect_size, if larger) if none supplied 292 * Use default chunk size (or hardsect_size, if larger) if none supplied
@@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot)
297 if (r) 308 if (r)
298 return r; 309 return r;
299 310
300 r = chunk_io(ps, 0, READ, 1); 311 r = chunk_io(ps, ps->header_area, 0, READ, 1);
301 if (r) 312 if (r)
302 goto bad; 313 goto bad;
303 314
304 dh = (struct disk_header *) ps->area; 315 dh = ps->header_area;
305 316
306 if (le32_to_cpu(dh->magic) == 0) { 317 if (le32_to_cpu(dh->magic) == 0) {
307 *new_snapshot = 1; 318 *new_snapshot = 1;
@@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot)
319 ps->version = le32_to_cpu(dh->version); 330 ps->version = le32_to_cpu(dh->version);
320 chunk_size = le32_to_cpu(dh->chunk_size); 331 chunk_size = le32_to_cpu(dh->chunk_size);
321 332
322 if (!chunk_size_supplied || ps->store->chunk_size == chunk_size) 333 if (ps->store->chunk_size == chunk_size)
323 return 0; 334 return 0;
324 335
325 DMWARN("chunk size %llu in device metadata overrides " 336 if (chunk_size_supplied)
326 "table chunk size of %llu.", 337 DMWARN("chunk size %llu in device metadata overrides "
327 (unsigned long long)chunk_size, 338 "table chunk size of %llu.",
328 (unsigned long long)ps->store->chunk_size); 339 (unsigned long long)chunk_size,
340 (unsigned long long)ps->store->chunk_size);
329 341
330 /* We had a bogus chunk_size. Fix stuff up. */ 342 /* We had a bogus chunk_size. Fix stuff up. */
331 free_area(ps); 343 free_area(ps);
332 344
333 ps->store->chunk_size = chunk_size; 345 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
334 ps->store->chunk_mask = chunk_size - 1; 346 &chunk_err);
335 ps->store->chunk_shift = ffs(chunk_size) - 1; 347 if (r) {
348 DMERR("invalid on-disk chunk size %llu: %s.",
349 (unsigned long long)chunk_size, chunk_err);
350 return r;
351 }
336 352
337 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), 353 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
338 ps->io_client); 354 ps->io_client);
@@ -351,15 +367,15 @@ static int write_header(struct pstore *ps)
351{ 367{
352 struct disk_header *dh; 368 struct disk_header *dh;
353 369
354 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); 370 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
355 371
356 dh = (struct disk_header *) ps->area; 372 dh = ps->header_area;
357 dh->magic = cpu_to_le32(SNAP_MAGIC); 373 dh->magic = cpu_to_le32(SNAP_MAGIC);
358 dh->valid = cpu_to_le32(ps->valid); 374 dh->valid = cpu_to_le32(ps->valid);
359 dh->version = cpu_to_le32(ps->version); 375 dh->version = cpu_to_le32(ps->version);
360 dh->chunk_size = cpu_to_le32(ps->store->chunk_size); 376 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
361 377
362 return chunk_io(ps, 0, WRITE, 1); 378 return chunk_io(ps, ps->header_area, 0, WRITE, 1);
363} 379}
364 380
365/* 381/*
@@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store,
679 ps->valid = 1; 695 ps->valid = 1;
680 ps->version = SNAPSHOT_DISK_VERSION; 696 ps->version = SNAPSHOT_DISK_VERSION;
681 ps->area = NULL; 697 ps->area = NULL;
698 ps->zero_area = NULL;
699 ps->header_area = NULL;
682 ps->next_free = 2; /* skipping the header and first area */ 700 ps->next_free = 2; /* skipping the header and first area */
683 ps->current_committed = 0; 701 ps->current_committed = 0;
684 702
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d573165cd2b7..57f1bf7f3b7a 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
1176 return 0; 1176 return 0;
1177} 1177}
1178 1178
1179static int snapshot_iterate_devices(struct dm_target *ti,
1180 iterate_devices_callout_fn fn, void *data)
1181{
1182 struct dm_snapshot *snap = ti->private;
1183
1184 return fn(ti, snap->origin, 0, ti->len, data);
1185}
1186
1187
1179/*----------------------------------------------------------------- 1188/*-----------------------------------------------------------------
1180 * Origin methods 1189 * Origin methods
1181 *---------------------------------------------------------------*/ 1190 *---------------------------------------------------------------*/
@@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1410 return 0; 1419 return 0;
1411} 1420}
1412 1421
1422static int origin_iterate_devices(struct dm_target *ti,
1423 iterate_devices_callout_fn fn, void *data)
1424{
1425 struct dm_dev *dev = ti->private;
1426
1427 return fn(ti, dev, 0, ti->len, data);
1428}
1429
1413static struct target_type origin_target = { 1430static struct target_type origin_target = {
1414 .name = "snapshot-origin", 1431 .name = "snapshot-origin",
1415 .version = {1, 6, 0}, 1432 .version = {1, 7, 0},
1416 .module = THIS_MODULE, 1433 .module = THIS_MODULE,
1417 .ctr = origin_ctr, 1434 .ctr = origin_ctr,
1418 .dtr = origin_dtr, 1435 .dtr = origin_dtr,
1419 .map = origin_map, 1436 .map = origin_map,
1420 .resume = origin_resume, 1437 .resume = origin_resume,
1421 .status = origin_status, 1438 .status = origin_status,
1439 .iterate_devices = origin_iterate_devices,
1422}; 1440};
1423 1441
1424static struct target_type snapshot_target = { 1442static struct target_type snapshot_target = {
1425 .name = "snapshot", 1443 .name = "snapshot",
1426 .version = {1, 6, 0}, 1444 .version = {1, 7, 0},
1427 .module = THIS_MODULE, 1445 .module = THIS_MODULE,
1428 .ctr = snapshot_ctr, 1446 .ctr = snapshot_ctr,
1429 .dtr = snapshot_dtr, 1447 .dtr = snapshot_dtr,
@@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = {
1431 .end_io = snapshot_end_io, 1449 .end_io = snapshot_end_io,
1432 .resume = snapshot_resume, 1450 .resume = snapshot_resume,
1433 .status = snapshot_status, 1451 .status = snapshot_status,
1452 .iterate_devices = snapshot_iterate_devices,
1434}; 1453};
1435 1454
1436static int __init dm_snapshot_init(void) 1455static int __init dm_snapshot_init(void)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 4e0e5937e42a..3e563d251733 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -329,9 +329,19 @@ static int stripe_iterate_devices(struct dm_target *ti,
329 return ret; 329 return ret;
330} 330}
331 331
332static void stripe_io_hints(struct dm_target *ti,
333 struct queue_limits *limits)
334{
335 struct stripe_c *sc = ti->private;
336 unsigned chunk_size = (sc->chunk_mask + 1) << 9;
337
338 blk_limits_io_min(limits, chunk_size);
339 limits->io_opt = chunk_size * sc->stripes;
340}
341
332static struct target_type stripe_target = { 342static struct target_type stripe_target = {
333 .name = "striped", 343 .name = "striped",
334 .version = {1, 2, 0}, 344 .version = {1, 3, 0},
335 .module = THIS_MODULE, 345 .module = THIS_MODULE,
336 .ctr = stripe_ctr, 346 .ctr = stripe_ctr,
337 .dtr = stripe_dtr, 347 .dtr = stripe_dtr,
@@ -339,6 +349,7 @@ static struct target_type stripe_target = {
339 .end_io = stripe_end_io, 349 .end_io = stripe_end_io,
340 .status = stripe_status, 350 .status = stripe_status,
341 .iterate_devices = stripe_iterate_devices, 351 .iterate_devices = stripe_iterate_devices,
352 .io_hints = stripe_io_hints,
342}; 353};
343 354
344int __init dm_stripe_init(void) 355int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index d952b3441913..1a6cb3c7822e 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
343} 343}
344 344
345/* 345/*
346 * If possible, this checks an area of a destination device is valid. 346 * If possible, this checks an area of a destination device is invalid.
347 */ 347 */
348static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, 348static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
349 sector_t start, sector_t len, void *data) 349 sector_t start, sector_t len, void *data)
350{ 350{
351 struct queue_limits *limits = data; 351 struct queue_limits *limits = data;
352 struct block_device *bdev = dev->bdev; 352 struct block_device *bdev = dev->bdev;
@@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev,
357 char b[BDEVNAME_SIZE]; 357 char b[BDEVNAME_SIZE];
358 358
359 if (!dev_size) 359 if (!dev_size)
360 return 1; 360 return 0;
361 361
362 if ((start >= dev_size) || (start + len > dev_size)) { 362 if ((start >= dev_size) || (start + len > dev_size)) {
363 DMWARN("%s: %s too small for target", 363 DMWARN("%s: %s too small for target: "
364 dm_device_name(ti->table->md), bdevname(bdev, b)); 364 "start=%llu, len=%llu, dev_size=%llu",
365 return 0; 365 dm_device_name(ti->table->md), bdevname(bdev, b),
366 (unsigned long long)start,
367 (unsigned long long)len,
368 (unsigned long long)dev_size);
369 return 1;
366 } 370 }
367 371
368 if (logical_block_size_sectors <= 1) 372 if (logical_block_size_sectors <= 1)
369 return 1; 373 return 0;
370 374
371 if (start & (logical_block_size_sectors - 1)) { 375 if (start & (logical_block_size_sectors - 1)) {
372 DMWARN("%s: start=%llu not aligned to h/w " 376 DMWARN("%s: start=%llu not aligned to h/w "
373 "logical block size %hu of %s", 377 "logical block size %u of %s",
374 dm_device_name(ti->table->md), 378 dm_device_name(ti->table->md),
375 (unsigned long long)start, 379 (unsigned long long)start,
376 limits->logical_block_size, bdevname(bdev, b)); 380 limits->logical_block_size, bdevname(bdev, b));
377 return 0; 381 return 1;
378 } 382 }
379 383
380 if (len & (logical_block_size_sectors - 1)) { 384 if (len & (logical_block_size_sectors - 1)) {
381 DMWARN("%s: len=%llu not aligned to h/w " 385 DMWARN("%s: len=%llu not aligned to h/w "
382 "logical block size %hu of %s", 386 "logical block size %u of %s",
383 dm_device_name(ti->table->md), 387 dm_device_name(ti->table->md),
384 (unsigned long long)len, 388 (unsigned long long)len,
385 limits->logical_block_size, bdevname(bdev, b)); 389 limits->logical_block_size, bdevname(bdev, b));
386 return 0; 390 return 1;
387 } 391 }
388 392
389 return 1; 393 return 0;
390} 394}
391 395
392/* 396/*
@@ -496,8 +500,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
496 } 500 }
497 501
498 if (blk_stack_limits(limits, &q->limits, start << 9) < 0) 502 if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
499 DMWARN("%s: target device %s is misaligned", 503 DMWARN("%s: target device %s is misaligned: "
500 dm_device_name(ti->table->md), bdevname(bdev, b)); 504 "physical_block_size=%u, logical_block_size=%u, "
505 "alignment_offset=%u, start=%llu",
506 dm_device_name(ti->table->md), bdevname(bdev, b),
507 q->limits.physical_block_size,
508 q->limits.logical_block_size,
509 q->limits.alignment_offset,
510 (unsigned long long) start << 9);
511
501 512
502 /* 513 /*
503 * Check if merge fn is supported. 514 * Check if merge fn is supported.
@@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
698 709
699 if (remaining) { 710 if (remaining) {
700 DMWARN("%s: table line %u (start sect %llu len %llu) " 711 DMWARN("%s: table line %u (start sect %llu len %llu) "
701 "not aligned to h/w logical block size %hu", 712 "not aligned to h/w logical block size %u",
702 dm_device_name(table->md), i, 713 dm_device_name(table->md), i,
703 (unsigned long long) ti->begin, 714 (unsigned long long) ti->begin,
704 (unsigned long long) ti->len, 715 (unsigned long long) ti->len,
@@ -996,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table,
996 ti->type->iterate_devices(ti, dm_set_device_limits, 1007 ti->type->iterate_devices(ti, dm_set_device_limits,
997 &ti_limits); 1008 &ti_limits);
998 1009
1010 /* Set I/O hints portion of queue limits */
1011 if (ti->type->io_hints)
1012 ti->type->io_hints(ti, &ti_limits);
1013
999 /* 1014 /*
1000 * Check each device area is consistent with the target's 1015 * Check each device area is consistent with the target's
1001 * overall queue limits. 1016 * overall queue limits.
1002 */ 1017 */
1003 if (!ti->type->iterate_devices(ti, device_area_is_valid, 1018 if (ti->type->iterate_devices(ti, device_area_is_invalid,
1004 &ti_limits)) 1019 &ti_limits))
1005 return -EINVAL; 1020 return -EINVAL;
1006 1021
1007combine_limits: 1022combine_limits:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8a311ea0d441..b4845b14740d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -738,16 +738,22 @@ static void rq_completed(struct mapped_device *md, int run_queue)
738 dm_put(md); 738 dm_put(md);
739} 739}
740 740
741static void free_rq_clone(struct request *clone)
742{
743 struct dm_rq_target_io *tio = clone->end_io_data;
744
745 blk_rq_unprep_clone(clone);
746 free_rq_tio(tio);
747}
748
741static void dm_unprep_request(struct request *rq) 749static void dm_unprep_request(struct request *rq)
742{ 750{
743 struct request *clone = rq->special; 751 struct request *clone = rq->special;
744 struct dm_rq_target_io *tio = clone->end_io_data;
745 752
746 rq->special = NULL; 753 rq->special = NULL;
747 rq->cmd_flags &= ~REQ_DONTPREP; 754 rq->cmd_flags &= ~REQ_DONTPREP;
748 755
749 blk_rq_unprep_clone(clone); 756 free_rq_clone(clone);
750 free_rq_tio(tio);
751} 757}
752 758
753/* 759/*
@@ -825,8 +831,7 @@ static void dm_end_request(struct request *clone, int error)
825 rq->sense_len = clone->sense_len; 831 rq->sense_len = clone->sense_len;
826 } 832 }
827 833
828 BUG_ON(clone->bio); 834 free_rq_clone(clone);
829 free_rq_tio(tio);
830 835
831 blk_end_request_all(rq, error); 836 blk_end_request_all(rq, error);
832 837
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 103f2d33fa89..9dd872000cec 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4364,6 +4364,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4364 if (mode == 1) 4364 if (mode == 1)
4365 set_disk_ro(disk, 1); 4365 set_disk_ro(disk, 1);
4366 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4366 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4367 err = 0;
4367 } 4368 }
4368out: 4369out:
4369 mutex_unlock(&mddev->open_mutex); 4370 mutex_unlock(&mddev->open_mutex);
diff --git a/drivers/media/dvb/siano/Kconfig b/drivers/media/dvb/siano/Kconfig
index 88847d1dcbb5..8c1aed77ea30 100644
--- a/drivers/media/dvb/siano/Kconfig
+++ b/drivers/media/dvb/siano/Kconfig
@@ -2,25 +2,33 @@
2# Siano Mobile Silicon Digital TV device configuration 2# Siano Mobile Silicon Digital TV device configuration
3# 3#
4 4
5config DVB_SIANO_SMS1XXX 5config SMS_SIANO_MDTV
6 tristate "Siano SMS1XXX USB dongle support" 6 tristate "Siano SMS1xxx based MDTV receiver"
7 depends on DVB_CORE && USB && INPUT 7 depends on DVB_CORE && INPUT
8 ---help--- 8 ---help---
9 Choose Y here if you have a USB dongle with a SMS1XXX chipset. 9 Choose Y or M here if you have MDTV receiver with a Siano chipset.
10 10
11 To compile this driver as a module, choose M here: the 11 To compile this driver as a module, choose M here
12 module will be called sms1xxx. 12 (The module will be called smsmdtv).
13 13
14config DVB_SIANO_SMS1XXX_SMS_IDS 14 Further documentation on this driver can be found on the WWW
15 bool "Enable support for Siano Mobile Silicon default USB IDs" 15 at http://www.siano-ms.com/
16 depends on DVB_SIANO_SMS1XXX 16
17 default y 17if SMS_SIANO_MDTV
18 ---help--- 18menu "Siano module components"
19 Choose Y here if you have a USB dongle with a SMS1XXX chipset
20 that uses Siano Mobile Silicon's default usb vid:pid.
21 19
22 Choose N here if you would prefer to use Siano's external driver. 20# Hardware interfaces support
23 21
24 Further documentation on this driver can be found on the WWW at 22config SMS_USB_DRV
25 <http://www.siano-ms.com/>. 23 tristate "USB interface support"
24 depends on DVB_CORE && USB
25 ---help---
26 Choose if you would like to have Siano's support for USB interface
26 27
28config SMS_SDIO_DRV
29 tristate "SDIO interface support"
30 depends on DVB_CORE && MMC
31 ---help---
32 Choose if you would like to have Siano's support for SDIO interface
33endmenu
34endif # SMS_SIANO_MDTV
diff --git a/drivers/media/dvb/siano/Makefile b/drivers/media/dvb/siano/Makefile
index c6644d909433..c54140b5ab5a 100644
--- a/drivers/media/dvb/siano/Makefile
+++ b/drivers/media/dvb/siano/Makefile
@@ -1,8 +1,9 @@
1sms1xxx-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o
2 1
3obj-$(CONFIG_DVB_SIANO_SMS1XXX) += sms1xxx.o 2smsmdtv-objs := smscoreapi.o sms-cards.o smsendian.o smsir.o
4obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsusb.o 3
5obj-$(CONFIG_DVB_SIANO_SMS1XXX) += smsdvb.o 4obj-$(CONFIG_SMS_SIANO_MDTV) += smsmdtv.o smsdvb.o
5obj-$(CONFIG_SMS_USB_DRV) += smsusb.o
6obj-$(CONFIG_SMS_SDIO_DRV) += smssdio.o
6 7
7EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core 8EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
8 9
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index 3ee1c3902c56..266033ae2784 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -325,6 +325,16 @@ static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client,
325 0 : -ETIME; 325 0 : -ETIME;
326} 326}
327 327
328static inline int led_feedback(struct smsdvb_client_t *client)
329{
330 if (client->fe_status & FE_HAS_LOCK)
331 return sms_board_led_feedback(client->coredev,
332 (client->sms_stat_dvb.ReceptionData.BER
333 == 0) ? SMS_LED_HI : SMS_LED_LO);
334 else
335 return sms_board_led_feedback(client->coredev, SMS_LED_OFF);
336}
337
328static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat) 338static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat)
329{ 339{
330 struct smsdvb_client_t *client; 340 struct smsdvb_client_t *client;
@@ -332,6 +342,8 @@ static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat)
332 342
333 *stat = client->fe_status; 343 *stat = client->fe_status;
334 344
345 led_feedback(client);
346
335 return 0; 347 return 0;
336} 348}
337 349
@@ -342,6 +354,8 @@ static int smsdvb_read_ber(struct dvb_frontend *fe, u32 *ber)
342 354
343 *ber = client->sms_stat_dvb.ReceptionData.BER; 355 *ber = client->sms_stat_dvb.ReceptionData.BER;
344 356
357 led_feedback(client);
358
345 return 0; 359 return 0;
346} 360}
347 361
@@ -359,6 +373,8 @@ static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
359 (client->sms_stat_dvb.ReceptionData.InBandPwr 373 (client->sms_stat_dvb.ReceptionData.InBandPwr
360 + 95) * 3 / 2; 374 + 95) * 3 / 2;
361 375
376 led_feedback(client);
377
362 return 0; 378 return 0;
363} 379}
364 380
@@ -369,6 +385,8 @@ static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr)
369 385
370 *snr = client->sms_stat_dvb.ReceptionData.SNR; 386 *snr = client->sms_stat_dvb.ReceptionData.SNR;
371 387
388 led_feedback(client);
389
372 return 0; 390 return 0;
373} 391}
374 392
@@ -379,6 +397,8 @@ static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
379 397
380 *ucblocks = client->sms_stat_dvb.ReceptionData.ErrorTSPackets; 398 *ucblocks = client->sms_stat_dvb.ReceptionData.ErrorTSPackets;
381 399
400 led_feedback(client);
401
382 return 0; 402 return 0;
383} 403}
384 404
@@ -404,6 +424,8 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
404 u32 Data[3]; 424 u32 Data[3];
405 } Msg; 425 } Msg;
406 426
427 int ret;
428
407 client->fe_status = FE_HAS_SIGNAL; 429 client->fe_status = FE_HAS_SIGNAL;
408 client->event_fe_state = -1; 430 client->event_fe_state = -1;
409 client->event_unc_state = -1; 431 client->event_unc_state = -1;
@@ -426,6 +448,23 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
426 case BANDWIDTH_AUTO: return -EOPNOTSUPP; 448 case BANDWIDTH_AUTO: return -EOPNOTSUPP;
427 default: return -EINVAL; 449 default: return -EINVAL;
428 } 450 }
451 /* Disable LNA, if any. An error is returned if no LNA is present */
452 ret = sms_board_lna_control(client->coredev, 0);
453 if (ret == 0) {
454 fe_status_t status;
455
456 /* tune with LNA off at first */
457 ret = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
458 &client->tune_done);
459
460 smsdvb_read_status(fe, &status);
461
462 if (status & FE_HAS_LOCK)
463 return ret;
464
465 /* previous tune didnt lock - enable LNA and tune again */
466 sms_board_lna_control(client->coredev, 1);
467 }
429 468
430 return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg), 469 return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
431 &client->tune_done); 470 &client->tune_done);
@@ -451,6 +490,8 @@ static int smsdvb_init(struct dvb_frontend *fe)
451 struct smsdvb_client_t *client = 490 struct smsdvb_client_t *client =
452 container_of(fe, struct smsdvb_client_t, frontend); 491 container_of(fe, struct smsdvb_client_t, frontend);
453 492
493 sms_board_power(client->coredev, 1);
494
454 sms_board_dvb3_event(client, DVB3_EVENT_INIT); 495 sms_board_dvb3_event(client, DVB3_EVENT_INIT);
455 return 0; 496 return 0;
456} 497}
@@ -460,6 +501,9 @@ static int smsdvb_sleep(struct dvb_frontend *fe)
460 struct smsdvb_client_t *client = 501 struct smsdvb_client_t *client =
461 container_of(fe, struct smsdvb_client_t, frontend); 502 container_of(fe, struct smsdvb_client_t, frontend);
462 503
504 sms_board_led_feedback(client->coredev, SMS_LED_OFF);
505 sms_board_power(client->coredev, 0);
506
463 sms_board_dvb3_event(client, DVB3_EVENT_SLEEP); 507 sms_board_dvb3_event(client, DVB3_EVENT_SLEEP);
464 508
465 return 0; 509 return 0;
diff --git a/drivers/media/dvb/siano/smssdio.c b/drivers/media/dvb/siano/smssdio.c
index dfaa49a53f32..d1d652e7f890 100644
--- a/drivers/media/dvb/siano/smssdio.c
+++ b/drivers/media/dvb/siano/smssdio.c
@@ -46,6 +46,7 @@
46 46
47#define SMSSDIO_DATA 0x00 47#define SMSSDIO_DATA 0x00
48#define SMSSDIO_INT 0x04 48#define SMSSDIO_INT 0x04
49#define SMSSDIO_BLOCK_SIZE 128
49 50
50static const struct sdio_device_id smssdio_ids[] = { 51static const struct sdio_device_id smssdio_ids[] = {
51 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR), 52 {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR),
@@ -85,7 +86,8 @@ static int smssdio_sendrequest(void *context, void *buffer, size_t size)
85 sdio_claim_host(smsdev->func); 86 sdio_claim_host(smsdev->func);
86 87
87 while (size >= smsdev->func->cur_blksize) { 88 while (size >= smsdev->func->cur_blksize) {
88 ret = sdio_write_blocks(smsdev->func, SMSSDIO_DATA, buffer, 1); 89 ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA,
90 buffer, smsdev->func->cur_blksize);
89 if (ret) 91 if (ret)
90 goto out; 92 goto out;
91 93
@@ -94,8 +96,8 @@ static int smssdio_sendrequest(void *context, void *buffer, size_t size)
94 } 96 }
95 97
96 if (size) { 98 if (size) {
97 ret = sdio_write_bytes(smsdev->func, SMSSDIO_DATA, 99 ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA,
98 buffer, size); 100 buffer, size);
99 } 101 }
100 102
101out: 103out:
@@ -125,23 +127,23 @@ static void smssdio_interrupt(struct sdio_func *func)
125 */ 127 */
126 isr = sdio_readb(func, SMSSDIO_INT, &ret); 128 isr = sdio_readb(func, SMSSDIO_INT, &ret);
127 if (ret) { 129 if (ret) {
128 dev_err(&smsdev->func->dev, 130 sms_err("Unable to read interrupt register!\n");
129 "Unable to read interrupt register!\n");
130 return; 131 return;
131 } 132 }
132 133
133 if (smsdev->split_cb == NULL) { 134 if (smsdev->split_cb == NULL) {
134 cb = smscore_getbuffer(smsdev->coredev); 135 cb = smscore_getbuffer(smsdev->coredev);
135 if (!cb) { 136 if (!cb) {
136 dev_err(&smsdev->func->dev, 137 sms_err("Unable to allocate data buffer!\n");
137 "Unable to allocate data buffer!\n");
138 return; 138 return;
139 } 139 }
140 140
141 ret = sdio_read_blocks(smsdev->func, cb->p, SMSSDIO_DATA, 1); 141 ret = sdio_memcpy_fromio(smsdev->func,
142 cb->p,
143 SMSSDIO_DATA,
144 SMSSDIO_BLOCK_SIZE);
142 if (ret) { 145 if (ret) {
143 dev_err(&smsdev->func->dev, 146 sms_err("Error %d reading initial block!\n", ret);
144 "Error %d reading initial block!\n", ret);
145 return; 147 return;
146 } 148 }
147 149
@@ -152,7 +154,10 @@ static void smssdio_interrupt(struct sdio_func *func)
152 return; 154 return;
153 } 155 }
154 156
155 size = hdr->msgLength - smsdev->func->cur_blksize; 157 if (hdr->msgLength > smsdev->func->cur_blksize)
158 size = hdr->msgLength - smsdev->func->cur_blksize;
159 else
160 size = 0;
156 } else { 161 } else {
157 cb = smsdev->split_cb; 162 cb = smsdev->split_cb;
158 hdr = cb->p; 163 hdr = cb->p;
@@ -162,23 +167,24 @@ static void smssdio_interrupt(struct sdio_func *func)
162 smsdev->split_cb = NULL; 167 smsdev->split_cb = NULL;
163 } 168 }
164 169
165 if (hdr->msgLength > smsdev->func->cur_blksize) { 170 if (size) {
166 void *buffer; 171 void *buffer;
167 172
168 size = ALIGN(size, 128); 173 buffer = cb->p + (hdr->msgLength - size);
169 buffer = cb->p + hdr->msgLength; 174 size = ALIGN(size, SMSSDIO_BLOCK_SIZE);
170 175
171 BUG_ON(smsdev->func->cur_blksize != 128); 176 BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE);
172 177
173 /* 178 /*
174 * First attempt to transfer all of it in one go... 179 * First attempt to transfer all of it in one go...
175 */ 180 */
176 ret = sdio_read_blocks(smsdev->func, buffer, 181 ret = sdio_memcpy_fromio(smsdev->func,
177 SMSSDIO_DATA, size / 128); 182 buffer,
183 SMSSDIO_DATA,
184 size);
178 if (ret && ret != -EINVAL) { 185 if (ret && ret != -EINVAL) {
179 smscore_putbuffer(smsdev->coredev, cb); 186 smscore_putbuffer(smsdev->coredev, cb);
180 dev_err(&smsdev->func->dev, 187 sms_err("Error %d reading data from card!\n", ret);
181 "Error %d reading data from card!\n", ret);
182 return; 188 return;
183 } 189 }
184 190
@@ -191,12 +197,12 @@ static void smssdio_interrupt(struct sdio_func *func)
191 */ 197 */
192 if (ret == -EINVAL) { 198 if (ret == -EINVAL) {
193 while (size) { 199 while (size) {
194 ret = sdio_read_blocks(smsdev->func, 200 ret = sdio_memcpy_fromio(smsdev->func,
195 buffer, SMSSDIO_DATA, 1); 201 buffer, SMSSDIO_DATA,
202 smsdev->func->cur_blksize);
196 if (ret) { 203 if (ret) {
197 smscore_putbuffer(smsdev->coredev, cb); 204 smscore_putbuffer(smsdev->coredev, cb);
198 dev_err(&smsdev->func->dev, 205 sms_err("Error %d reading "
199 "Error %d reading "
200 "data from card!\n", ret); 206 "data from card!\n", ret);
201 return; 207 return;
202 } 208 }
@@ -269,7 +275,7 @@ static int smssdio_probe(struct sdio_func *func,
269 if (ret) 275 if (ret)
270 goto release; 276 goto release;
271 277
272 ret = sdio_set_block_size(func, 128); 278 ret = sdio_set_block_size(func, SMSSDIO_BLOCK_SIZE);
273 if (ret) 279 if (ret)
274 goto disable; 280 goto disable;
275 281
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index ed281f565945..1c2e544eda73 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -1730,6 +1730,25 @@ static inline void em28xx_set_model(struct em28xx *dev)
1730 EM28XX_I2C_FREQ_100_KHZ; 1730 EM28XX_I2C_FREQ_100_KHZ;
1731} 1731}
1732 1732
1733
1734/* FIXME: Should be replaced by a proper mt9m111 driver */
1735static int em28xx_initialize_mt9m111(struct em28xx *dev)
1736{
1737 int i;
1738 unsigned char regs[][3] = {
1739 { 0x0d, 0x00, 0x01, }, /* reset and use defaults */
1740 { 0x0d, 0x00, 0x00, },
1741 { 0x0a, 0x00, 0x21, },
1742 { 0x21, 0x04, 0x00, }, /* full readout speed, no row/col skipping */
1743 };
1744
1745 for (i = 0; i < ARRAY_SIZE(regs); i++)
1746 i2c_master_send(&dev->i2c_client, &regs[i][0], 3);
1747
1748 return 0;
1749}
1750
1751
1733/* FIXME: Should be replaced by a proper mt9m001 driver */ 1752/* FIXME: Should be replaced by a proper mt9m001 driver */
1734static int em28xx_initialize_mt9m001(struct em28xx *dev) 1753static int em28xx_initialize_mt9m001(struct em28xx *dev)
1735{ 1754{
@@ -1758,7 +1777,7 @@ static int em28xx_initialize_mt9m001(struct em28xx *dev)
1758 1777
1759/* HINT method: webcam I2C chips 1778/* HINT method: webcam I2C chips
1760 * 1779 *
1761 * This method work for webcams with Micron sensors 1780 * This method works for webcams with Micron sensors
1762 */ 1781 */
1763static int em28xx_hint_sensor(struct em28xx *dev) 1782static int em28xx_hint_sensor(struct em28xx *dev)
1764{ 1783{
@@ -1804,6 +1823,23 @@ static int em28xx_hint_sensor(struct em28xx *dev)
1804 dev->vinctl = 0x00; 1823 dev->vinctl = 0x00;
1805 1824
1806 break; 1825 break;
1826
1827 case 0x143a: /* MT9M111 as found in the ECS G200 */
1828 dev->model = EM2750_BOARD_UNKNOWN;
1829 em28xx_set_model(dev);
1830
1831 sensor_name = "mt9m111";
1832 dev->board.xclk = EM28XX_XCLK_FREQUENCY_48MHZ;
1833 dev->em28xx_sensor = EM28XX_MT9M111;
1834 em28xx_initialize_mt9m111(dev);
1835 dev->sensor_xres = 640;
1836 dev->sensor_yres = 512;
1837
1838 dev->vinmode = 0x0a;
1839 dev->vinctl = 0x00;
1840
1841 break;
1842
1807 case 0x8431: 1843 case 0x8431:
1808 dev->model = EM2750_BOARD_UNKNOWN; 1844 dev->model = EM2750_BOARD_UNKNOWN;
1809 em28xx_set_model(dev); 1845 em28xx_set_model(dev);
@@ -1820,7 +1856,7 @@ static int em28xx_hint_sensor(struct em28xx *dev)
1820 1856
1821 break; 1857 break;
1822 default: 1858 default:
1823 printk("Unknown Micron Sensor 0x%04x\n", be16_to_cpu(version)); 1859 printk("Unknown Micron Sensor 0x%04x\n", version);
1824 return -EINVAL; 1860 return -EINVAL;
1825 } 1861 }
1826 1862
@@ -2346,7 +2382,9 @@ void em28xx_card_setup(struct em28xx *dev)
2346 } 2382 }
2347 2383
2348 em28xx_tuner_setup(dev); 2384 em28xx_tuner_setup(dev);
2349 em28xx_ir_init(dev); 2385
2386 if(!disable_ir)
2387 em28xx_ir_init(dev);
2350} 2388}
2351 2389
2352 2390
diff --git a/drivers/media/video/em28xx/em28xx.h b/drivers/media/video/em28xx/em28xx.h
index 8c2dc38bca9f..a2add61f7d59 100644
--- a/drivers/media/video/em28xx/em28xx.h
+++ b/drivers/media/video/em28xx/em28xx.h
@@ -367,6 +367,7 @@ enum em28xx_sensor {
367 EM28XX_NOSENSOR = 0, 367 EM28XX_NOSENSOR = 0,
368 EM28XX_MT9V011, 368 EM28XX_MT9V011,
369 EM28XX_MT9M001, 369 EM28XX_MT9M001,
370 EM28XX_MT9M111,
370}; 371};
371 372
372enum em28xx_adecoder { 373enum em28xx_adecoder {
diff --git a/drivers/media/video/gspca/Kconfig b/drivers/media/video/gspca/Kconfig
index 34f46f2bc040..e994dcac43ff 100644
--- a/drivers/media/video/gspca/Kconfig
+++ b/drivers/media/video/gspca/Kconfig
@@ -114,7 +114,7 @@ config USB_GSPCA_SN9C20X
114 114
115config USB_GSPCA_SN9C20X_EVDEV 115config USB_GSPCA_SN9C20X_EVDEV
116 bool "Enable evdev support" 116 bool "Enable evdev support"
117 depends on USB_GSPCA_SN9C20X 117 depends on USB_GSPCA_SN9C20X && INPUT
118 ---help--- 118 ---help---
119 Say Y here in order to enable evdev support for sn9c20x webcam button. 119 Say Y here in order to enable evdev support for sn9c20x webcam button.
120 120
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index fc976f42f432..2622a6e63da1 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -695,7 +695,7 @@ static int zr364xx_release(struct file *file)
695 for (i = 0; i < 2; i++) { 695 for (i = 0; i < 2; i++) {
696 err = 696 err =
697 send_control_msg(udev, 1, init[cam->method][i].value, 697 send_control_msg(udev, 1, init[cam->method][i].value,
698 0, init[i][cam->method].bytes, 698 0, init[cam->method][i].bytes,
699 init[cam->method][i].size); 699 init[cam->method][i].size);
700 if (err < 0) { 700 if (err < 0) {
701 dev_err(&udev->dev, "error during release sequence\n"); 701 dev_err(&udev->dev, "error during release sequence\n");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 7ad972229db4..0d9d4bc9c762 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -61,7 +61,7 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
61 buf64 = (uint64_t *)buf; 61 buf64 = (uint64_t *)buf;
62 while (i < len/8) { 62 while (i < len/8) {
63 uint64_t x; 63 uint64_t x;
64 asm ("ldrd\t%0, [%1]" : "=r" (x) : "r" (io_base)); 64 asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
65 buf64[i++] = x; 65 buf64[i++] = x;
66 } 66 }
67 i *= 8; 67 i *= 8;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index c20416850948..45675889850b 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -235,6 +235,7 @@ enum vortex_chips {
235 CH_3C900B_FL, 235 CH_3C900B_FL,
236 CH_3C905_1, 236 CH_3C905_1,
237 CH_3C905_2, 237 CH_3C905_2,
238 CH_3C905B_TX,
238 CH_3C905B_1, 239 CH_3C905B_1,
239 240
240 CH_3C905B_2, 241 CH_3C905B_2,
@@ -307,6 +308,8 @@ static struct vortex_chip_info {
307 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, 308 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
308 {"3c905 Boomerang 100baseT4", 309 {"3c905 Boomerang 100baseT4",
309 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, 310 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
311 {"3C905B-TX Fast Etherlink XL PCI",
312 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
310 {"3c905B Cyclone 100baseTx", 313 {"3c905B Cyclone 100baseTx",
311 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, 314 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
312 315
@@ -389,6 +392,7 @@ static struct pci_device_id vortex_pci_tbl[] = {
389 { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, 392 { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
390 { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, 393 { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
391 { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, 394 { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
395 { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX },
392 { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, 396 { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
393 397
394 { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, 398 { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 50efde11ea6c..d0dbbf39349a 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -515,7 +515,7 @@ rx_status_loop:
515 dma_addr_t mapping; 515 dma_addr_t mapping;
516 struct sk_buff *skb, *new_skb; 516 struct sk_buff *skb, *new_skb;
517 struct cp_desc *desc; 517 struct cp_desc *desc;
518 unsigned buflen; 518 const unsigned buflen = cp->rx_buf_sz;
519 519
520 skb = cp->rx_skb[rx_tail]; 520 skb = cp->rx_skb[rx_tail];
521 BUG_ON(!skb); 521 BUG_ON(!skb);
@@ -549,8 +549,7 @@ rx_status_loop:
549 pr_debug("%s: rx slot %d status 0x%x len %d\n", 549 pr_debug("%s: rx slot %d status 0x%x len %d\n",
550 dev->name, rx_tail, status, len); 550 dev->name, rx_tail, status, len);
551 551
552 buflen = cp->rx_buf_sz + NET_IP_ALIGN; 552 new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN);
553 new_skb = netdev_alloc_skb(dev, buflen);
554 if (!new_skb) { 553 if (!new_skb) {
555 dev->stats.rx_dropped++; 554 dev->stats.rx_dropped++;
556 goto rx_next; 555 goto rx_next;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 5f6509a5f640..5ce7cbabd7a7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1727,12 +1727,14 @@ config KS8842
1727 tristate "Micrel KSZ8842" 1727 tristate "Micrel KSZ8842"
1728 depends on HAS_IOMEM 1728 depends on HAS_IOMEM
1729 help 1729 help
1730 This platform driver is for Micrel KSZ8842 chip. 1730 This platform driver is for Micrel KSZ8842 / KS8842
1731 2-port ethernet switch chip (managed, VLAN, QoS).
1731 1732
1732config KS8851 1733config KS8851
1733 tristate "Micrel KS8851 SPI" 1734 tristate "Micrel KS8851 SPI"
1734 depends on SPI 1735 depends on SPI
1735 select MII 1736 select MII
1737 select CRC32
1736 help 1738 help
1737 SPI driver for Micrel KS8851 SPI attached network chip. 1739 SPI driver for Micrel KS8851 SPI attached network chip.
1738 1740
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 616fb7985a34..ddd231cb54b7 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -1080,7 +1080,7 @@ static struct platform_driver w90p910_ether_driver = {
1080 .probe = w90p910_ether_probe, 1080 .probe = w90p910_ether_probe,
1081 .remove = __devexit_p(w90p910_ether_remove), 1081 .remove = __devexit_p(w90p910_ether_remove),
1082 .driver = { 1082 .driver = {
1083 .name = "w90p910-emc", 1083 .name = "nuc900-emc",
1084 .owner = THIS_MODULE, 1084 .owner = THIS_MODULE,
1085 }, 1085 },
1086}; 1086};
@@ -1101,5 +1101,5 @@ module_exit(w90p910_ether_exit);
1101MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); 1101MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
1102MODULE_DESCRIPTION("w90p910 MAC driver!"); 1102MODULE_DESCRIPTION("w90p910 MAC driver!");
1103MODULE_LICENSE("GPL"); 1103MODULE_LICENSE("GPL");
1104MODULE_ALIAS("platform:w90p910-emc"); 1104MODULE_ALIAS("platform:nuc900-emc");
1105 1105
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 607007d75b6f..00d11b480af3 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -232,11 +232,11 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
232{ 232{
233 struct atl1c_adapter *adapter = netdev_priv(netdev); 233 struct atl1c_adapter *adapter = netdev_priv(netdev);
234 234
235 strncpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); 235 strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
236 strncpy(drvinfo->version, atl1c_driver_version, 236 strlcpy(drvinfo->version, atl1c_driver_version,
237 sizeof(drvinfo->version)); 237 sizeof(drvinfo->version));
238 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 238 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
239 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 239 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
240 sizeof(drvinfo->bus_info)); 240 sizeof(drvinfo->bus_info));
241 drvinfo->n_stats = 0; 241 drvinfo->n_stats = 0;
242 drvinfo->testinfo_len = 0; 242 drvinfo->testinfo_len = 0;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 94d7325caf4f..8bca12f71390 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -3378,11 +3378,11 @@ static void atl1_get_drvinfo(struct net_device *netdev,
3378{ 3378{
3379 struct atl1_adapter *adapter = netdev_priv(netdev); 3379 struct atl1_adapter *adapter = netdev_priv(netdev);
3380 3380
3381 strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); 3381 strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
3382 strncpy(drvinfo->version, ATLX_DRIVER_VERSION, 3382 strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
3383 sizeof(drvinfo->version)); 3383 sizeof(drvinfo->version));
3384 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); 3384 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
3385 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 3385 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
3386 sizeof(drvinfo->bus_info)); 3386 sizeof(drvinfo->bus_info));
3387 drvinfo->eedump_len = ATL1_EEDUMP_LEN; 3387 drvinfo->eedump_len = ATL1_EEDUMP_LEN;
3388} 3388}
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 36d4d377ec2f..bafca672ea7d 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -952,9 +952,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
952 int rc = NETDEV_TX_OK; 952 int rc = NETDEV_TX_OK;
953 dma_addr_t mapping; 953 dma_addr_t mapping;
954 u32 len, entry, ctrl; 954 u32 len, entry, ctrl;
955 unsigned long flags;
955 956
956 len = skb->len; 957 len = skb->len;
957 spin_lock_irq(&bp->lock); 958 spin_lock_irqsave(&bp->lock, flags);
958 959
959 /* This is a hard error, log it. */ 960 /* This is a hard error, log it. */
960 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { 961 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
@@ -1027,7 +1028,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1027 dev->trans_start = jiffies; 1028 dev->trans_start = jiffies;
1028 1029
1029out_unlock: 1030out_unlock:
1030 spin_unlock_irq(&bp->lock); 1031 spin_unlock_irqrestore(&bp->lock, flags);
1031 1032
1032 return rc; 1033 return rc;
1033 1034
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index b70cc99962fc..06b901152d44 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -399,9 +399,11 @@ static int bnx2_unregister_cnic(struct net_device *dev)
399 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 399 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
400 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 400 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
401 401
402 mutex_lock(&bp->cnic_lock);
402 cp->drv_state = 0; 403 cp->drv_state = 0;
403 bnapi->cnic_present = 0; 404 bnapi->cnic_present = 0;
404 rcu_assign_pointer(bp->cnic_ops, NULL); 405 rcu_assign_pointer(bp->cnic_ops, NULL);
406 mutex_unlock(&bp->cnic_lock);
405 synchronize_rcu(); 407 synchronize_rcu();
406 return 0; 408 return 0;
407} 409}
@@ -429,13 +431,13 @@ bnx2_cnic_stop(struct bnx2 *bp)
429 struct cnic_ops *c_ops; 431 struct cnic_ops *c_ops;
430 struct cnic_ctl_info info; 432 struct cnic_ctl_info info;
431 433
432 rcu_read_lock(); 434 mutex_lock(&bp->cnic_lock);
433 c_ops = rcu_dereference(bp->cnic_ops); 435 c_ops = bp->cnic_ops;
434 if (c_ops) { 436 if (c_ops) {
435 info.cmd = CNIC_CTL_STOP_CMD; 437 info.cmd = CNIC_CTL_STOP_CMD;
436 c_ops->cnic_ctl(bp->cnic_data, &info); 438 c_ops->cnic_ctl(bp->cnic_data, &info);
437 } 439 }
438 rcu_read_unlock(); 440 mutex_unlock(&bp->cnic_lock);
439} 441}
440 442
441static void 443static void
@@ -444,8 +446,8 @@ bnx2_cnic_start(struct bnx2 *bp)
444 struct cnic_ops *c_ops; 446 struct cnic_ops *c_ops;
445 struct cnic_ctl_info info; 447 struct cnic_ctl_info info;
446 448
447 rcu_read_lock(); 449 mutex_lock(&bp->cnic_lock);
448 c_ops = rcu_dereference(bp->cnic_ops); 450 c_ops = bp->cnic_ops;
449 if (c_ops) { 451 if (c_ops) {
450 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { 452 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
451 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 453 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -455,7 +457,7 @@ bnx2_cnic_start(struct bnx2 *bp)
455 info.cmd = CNIC_CTL_START_CMD; 457 info.cmd = CNIC_CTL_START_CMD;
456 c_ops->cnic_ctl(bp->cnic_data, &info); 458 c_ops->cnic_ctl(bp->cnic_data, &info);
457 } 459 }
458 rcu_read_unlock(); 460 mutex_unlock(&bp->cnic_lock);
459} 461}
460 462
461#else 463#else
@@ -7663,6 +7665,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7663 7665
7664 spin_lock_init(&bp->phy_lock); 7666 spin_lock_init(&bp->phy_lock);
7665 spin_lock_init(&bp->indirect_lock); 7667 spin_lock_init(&bp->indirect_lock);
7668#ifdef BCM_CNIC
7669 mutex_init(&bp->cnic_lock);
7670#endif
7666 INIT_WORK(&bp->reset_task, bnx2_reset_task); 7671 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7667 7672
7668 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 7673 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index f1edfaa9e56a..a4f12fd0ecd2 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6902,6 +6902,7 @@ struct bnx2 {
6902 u32 idle_chk_status_idx; 6902 u32 idle_chk_status_idx;
6903 6903
6904#ifdef BCM_CNIC 6904#ifdef BCM_CNIC
6905 struct mutex cnic_lock;
6905 struct cnic_eth_dev cnic_eth_dev; 6906 struct cnic_eth_dev cnic_eth_dev;
6906#endif 6907#endif
6907 6908
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 9e4283aff828..e1a4f8214239 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -611,11 +611,18 @@ nla_put_failure:
611 return -EMSGSIZE; 611 return -EMSGSIZE;
612} 612}
613 613
614static int can_newlink(struct net_device *dev,
615 struct nlattr *tb[], struct nlattr *data[])
616{
617 return -EOPNOTSUPP;
618}
619
614static struct rtnl_link_ops can_link_ops __read_mostly = { 620static struct rtnl_link_ops can_link_ops __read_mostly = {
615 .kind = "can", 621 .kind = "can",
616 .maxtype = IFLA_CAN_MAX, 622 .maxtype = IFLA_CAN_MAX,
617 .policy = can_policy, 623 .policy = can_policy,
618 .setup = can_setup, 624 .setup = can_setup,
625 .newlink = can_newlink,
619 .changelink = can_changelink, 626 .changelink = can_changelink,
620 .fill_info = can_fill_info, 627 .fill_info = can_fill_info,
621 .fill_xstats = can_fill_xstats, 628 .fill_xstats = can_fill_xstats,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4869d77cbe91..74c342959b7b 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
138 return NULL; 138 return NULL;
139} 139}
140 140
141static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
142{
143 atomic_inc(&ulp_ops->ref_count);
144}
145
146static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
147{
148 atomic_dec(&ulp_ops->ref_count);
149}
150
141static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 151static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
142{ 152{
143 struct cnic_local *cp = dev->cnic_priv; 153 struct cnic_local *cp = dev->cnic_priv;
@@ -358,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
358 } 368 }
359 read_unlock(&cnic_dev_lock); 369 read_unlock(&cnic_dev_lock);
360 370
371 atomic_set(&ulp_ops->ref_count, 0);
361 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 372 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
362 mutex_unlock(&cnic_lock); 373 mutex_unlock(&cnic_lock);
363 374
@@ -379,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
379int cnic_unregister_driver(int ulp_type) 390int cnic_unregister_driver(int ulp_type)
380{ 391{
381 struct cnic_dev *dev; 392 struct cnic_dev *dev;
393 struct cnic_ulp_ops *ulp_ops;
394 int i = 0;
382 395
383 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 396 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
384 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", 397 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
@@ -386,7 +399,8 @@ int cnic_unregister_driver(int ulp_type)
386 return -EINVAL; 399 return -EINVAL;
387 } 400 }
388 mutex_lock(&cnic_lock); 401 mutex_lock(&cnic_lock);
389 if (!cnic_ulp_tbl[ulp_type]) { 402 ulp_ops = cnic_ulp_tbl[ulp_type];
403 if (!ulp_ops) {
390 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " 404 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
391 "been registered\n", ulp_type); 405 "been registered\n", ulp_type);
392 goto out_unlock; 406 goto out_unlock;
@@ -411,6 +425,14 @@ int cnic_unregister_driver(int ulp_type)
411 425
412 mutex_unlock(&cnic_lock); 426 mutex_unlock(&cnic_lock);
413 synchronize_rcu(); 427 synchronize_rcu();
428 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
429 msleep(100);
430 i++;
431 }
432
433 if (atomic_read(&ulp_ops->ref_count) != 0)
434 printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
435 " to zero.\n", dev->netdev->name);
414 return 0; 436 return 0;
415 437
416out_unlock: 438out_unlock:
@@ -466,6 +488,7 @@ EXPORT_SYMBOL(cnic_register_driver);
466static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 488static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
467{ 489{
468 struct cnic_local *cp = dev->cnic_priv; 490 struct cnic_local *cp = dev->cnic_priv;
491 int i = 0;
469 492
470 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 493 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
471 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", 494 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
@@ -486,6 +509,15 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
486 509
487 synchronize_rcu(); 510 synchronize_rcu();
488 511
512 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
513 i < 20) {
514 msleep(100);
515 i++;
516 }
517 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
518 printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call"
519 " to complete.\n", dev->netdev->name);
520
489 return 0; 521 return 0;
490} 522}
491EXPORT_SYMBOL(cnic_unregister_driver); 523EXPORT_SYMBOL(cnic_unregister_driver);
@@ -1076,18 +1108,23 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
1076 if (cp->cnic_uinfo) 1108 if (cp->cnic_uinfo)
1077 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 1109 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
1078 1110
1079 rcu_read_lock();
1080 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 1111 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1081 struct cnic_ulp_ops *ulp_ops; 1112 struct cnic_ulp_ops *ulp_ops;
1082 1113
1083 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 1114 mutex_lock(&cnic_lock);
1084 if (!ulp_ops) 1115 ulp_ops = cp->ulp_ops[if_type];
1116 if (!ulp_ops) {
1117 mutex_unlock(&cnic_lock);
1085 continue; 1118 continue;
1119 }
1120 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
1121 mutex_unlock(&cnic_lock);
1086 1122
1087 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 1123 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1088 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 1124 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
1125
1126 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
1089 } 1127 }
1090 rcu_read_unlock();
1091} 1128}
1092 1129
1093static void cnic_ulp_start(struct cnic_dev *dev) 1130static void cnic_ulp_start(struct cnic_dev *dev)
@@ -1095,18 +1132,23 @@ static void cnic_ulp_start(struct cnic_dev *dev)
1095 struct cnic_local *cp = dev->cnic_priv; 1132 struct cnic_local *cp = dev->cnic_priv;
1096 int if_type; 1133 int if_type;
1097 1134
1098 rcu_read_lock();
1099 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 1135 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1100 struct cnic_ulp_ops *ulp_ops; 1136 struct cnic_ulp_ops *ulp_ops;
1101 1137
1102 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 1138 mutex_lock(&cnic_lock);
1103 if (!ulp_ops || !ulp_ops->cnic_start) 1139 ulp_ops = cp->ulp_ops[if_type];
1140 if (!ulp_ops || !ulp_ops->cnic_start) {
1141 mutex_unlock(&cnic_lock);
1104 continue; 1142 continue;
1143 }
1144 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
1145 mutex_unlock(&cnic_lock);
1105 1146
1106 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 1147 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1107 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 1148 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
1149
1150 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
1108 } 1151 }
1109 rcu_read_unlock();
1110} 1152}
1111 1153
1112static int cnic_ctl(void *data, struct cnic_ctl_info *info) 1154static int cnic_ctl(void *data, struct cnic_ctl_info *info)
@@ -1116,22 +1158,18 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1116 switch (info->cmd) { 1158 switch (info->cmd) {
1117 case CNIC_CTL_STOP_CMD: 1159 case CNIC_CTL_STOP_CMD:
1118 cnic_hold(dev); 1160 cnic_hold(dev);
1119 mutex_lock(&cnic_lock);
1120 1161
1121 cnic_ulp_stop(dev); 1162 cnic_ulp_stop(dev);
1122 cnic_stop_hw(dev); 1163 cnic_stop_hw(dev);
1123 1164
1124 mutex_unlock(&cnic_lock);
1125 cnic_put(dev); 1165 cnic_put(dev);
1126 break; 1166 break;
1127 case CNIC_CTL_START_CMD: 1167 case CNIC_CTL_START_CMD:
1128 cnic_hold(dev); 1168 cnic_hold(dev);
1129 mutex_lock(&cnic_lock);
1130 1169
1131 if (!cnic_start_hw(dev)) 1170 if (!cnic_start_hw(dev))
1132 cnic_ulp_start(dev); 1171 cnic_ulp_start(dev);
1133 1172
1134 mutex_unlock(&cnic_lock);
1135 cnic_put(dev); 1173 cnic_put(dev);
1136 break; 1174 break;
1137 default: 1175 default:
@@ -1145,19 +1183,23 @@ static void cnic_ulp_init(struct cnic_dev *dev)
1145 int i; 1183 int i;
1146 struct cnic_local *cp = dev->cnic_priv; 1184 struct cnic_local *cp = dev->cnic_priv;
1147 1185
1148 rcu_read_lock();
1149 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 1186 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1150 struct cnic_ulp_ops *ulp_ops; 1187 struct cnic_ulp_ops *ulp_ops;
1151 1188
1152 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); 1189 mutex_lock(&cnic_lock);
1153 if (!ulp_ops || !ulp_ops->cnic_init) 1190 ulp_ops = cnic_ulp_tbl[i];
1191 if (!ulp_ops || !ulp_ops->cnic_init) {
1192 mutex_unlock(&cnic_lock);
1154 continue; 1193 continue;
1194 }
1195 ulp_get(ulp_ops);
1196 mutex_unlock(&cnic_lock);
1155 1197
1156 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 1198 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1157 ulp_ops->cnic_init(dev); 1199 ulp_ops->cnic_init(dev);
1158 1200
1201 ulp_put(ulp_ops);
1159 } 1202 }
1160 rcu_read_unlock();
1161} 1203}
1162 1204
1163static void cnic_ulp_exit(struct cnic_dev *dev) 1205static void cnic_ulp_exit(struct cnic_dev *dev)
@@ -1165,19 +1207,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
1165 int i; 1207 int i;
1166 struct cnic_local *cp = dev->cnic_priv; 1208 struct cnic_local *cp = dev->cnic_priv;
1167 1209
1168 rcu_read_lock();
1169 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 1210 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1170 struct cnic_ulp_ops *ulp_ops; 1211 struct cnic_ulp_ops *ulp_ops;
1171 1212
1172 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); 1213 mutex_lock(&cnic_lock);
1173 if (!ulp_ops || !ulp_ops->cnic_exit) 1214 ulp_ops = cnic_ulp_tbl[i];
1215 if (!ulp_ops || !ulp_ops->cnic_exit) {
1216 mutex_unlock(&cnic_lock);
1174 continue; 1217 continue;
1218 }
1219 ulp_get(ulp_ops);
1220 mutex_unlock(&cnic_lock);
1175 1221
1176 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 1222 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1177 ulp_ops->cnic_exit(dev); 1223 ulp_ops->cnic_exit(dev);
1178 1224
1225 ulp_put(ulp_ops);
1179 } 1226 }
1180 rcu_read_unlock();
1181} 1227}
1182 1228
1183static int cnic_cm_offload_pg(struct cnic_sock *csk) 1229static int cnic_cm_offload_pg(struct cnic_sock *csk)
@@ -2393,21 +2439,45 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2393 return 0; 2439 return 0;
2394} 2440}
2395 2441
2396static int cnic_start_hw(struct cnic_dev *dev) 2442static int cnic_register_netdev(struct cnic_dev *dev)
2397{ 2443{
2398 struct cnic_local *cp = dev->cnic_priv; 2444 struct cnic_local *cp = dev->cnic_priv;
2399 struct cnic_eth_dev *ethdev = cp->ethdev; 2445 struct cnic_eth_dev *ethdev = cp->ethdev;
2400 int err; 2446 int err;
2401 2447
2402 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2448 if (!ethdev)
2403 return -EALREADY; 2449 return -ENODEV;
2450
2451 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
2452 return 0;
2404 2453
2405 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 2454 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
2406 if (err) { 2455 if (err)
2407 printk(KERN_ERR PFX "%s: register_cnic failed\n", 2456 printk(KERN_ERR PFX "%s: register_cnic failed\n",
2408 dev->netdev->name); 2457 dev->netdev->name);
2409 goto err2; 2458
2410 } 2459 return err;
2460}
2461
2462static void cnic_unregister_netdev(struct cnic_dev *dev)
2463{
2464 struct cnic_local *cp = dev->cnic_priv;
2465 struct cnic_eth_dev *ethdev = cp->ethdev;
2466
2467 if (!ethdev)
2468 return;
2469
2470 ethdev->drv_unregister_cnic(dev->netdev);
2471}
2472
2473static int cnic_start_hw(struct cnic_dev *dev)
2474{
2475 struct cnic_local *cp = dev->cnic_priv;
2476 struct cnic_eth_dev *ethdev = cp->ethdev;
2477 int err;
2478
2479 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
2480 return -EALREADY;
2411 2481
2412 dev->regview = ethdev->io_base; 2482 dev->regview = ethdev->io_base;
2413 cp->chip_id = ethdev->chip_id; 2483 cp->chip_id = ethdev->chip_id;
@@ -2438,18 +2508,13 @@ static int cnic_start_hw(struct cnic_dev *dev)
2438 return 0; 2508 return 0;
2439 2509
2440err1: 2510err1:
2441 ethdev->drv_unregister_cnic(dev->netdev);
2442 cp->free_resc(dev); 2511 cp->free_resc(dev);
2443 pci_dev_put(dev->pcidev); 2512 pci_dev_put(dev->pcidev);
2444err2:
2445 return err; 2513 return err;
2446} 2514}
2447 2515
2448static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 2516static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2449{ 2517{
2450 struct cnic_local *cp = dev->cnic_priv;
2451 struct cnic_eth_dev *ethdev = cp->ethdev;
2452
2453 cnic_disable_bnx2_int_sync(dev); 2518 cnic_disable_bnx2_int_sync(dev);
2454 2519
2455 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 2520 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
@@ -2461,8 +2526,6 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2461 cnic_setup_5709_context(dev, 0); 2526 cnic_setup_5709_context(dev, 0);
2462 cnic_free_irq(dev); 2527 cnic_free_irq(dev);
2463 2528
2464 ethdev->drv_unregister_cnic(dev->netdev);
2465
2466 cnic_free_resc(dev); 2529 cnic_free_resc(dev);
2467} 2530}
2468 2531
@@ -2543,7 +2606,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
2543 probe = symbol_get(bnx2_cnic_probe); 2606 probe = symbol_get(bnx2_cnic_probe);
2544 if (probe) { 2607 if (probe) {
2545 ethdev = (*probe)(dev); 2608 ethdev = (*probe)(dev);
2546 symbol_put_addr(probe); 2609 symbol_put(bnx2_cnic_probe);
2547 } 2610 }
2548 if (!ethdev) 2611 if (!ethdev)
2549 return NULL; 2612 return NULL;
@@ -2646,10 +2709,12 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2646 else if (event == NETDEV_UNREGISTER) 2709 else if (event == NETDEV_UNREGISTER)
2647 cnic_ulp_exit(dev); 2710 cnic_ulp_exit(dev);
2648 else if (event == NETDEV_UP) { 2711 else if (event == NETDEV_UP) {
2649 mutex_lock(&cnic_lock); 2712 if (cnic_register_netdev(dev) != 0) {
2713 cnic_put(dev);
2714 goto done;
2715 }
2650 if (!cnic_start_hw(dev)) 2716 if (!cnic_start_hw(dev))
2651 cnic_ulp_start(dev); 2717 cnic_ulp_start(dev);
2652 mutex_unlock(&cnic_lock);
2653 } 2718 }
2654 2719
2655 rcu_read_lock(); 2720 rcu_read_lock();
@@ -2668,10 +2733,9 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2668 rcu_read_unlock(); 2733 rcu_read_unlock();
2669 2734
2670 if (event == NETDEV_GOING_DOWN) { 2735 if (event == NETDEV_GOING_DOWN) {
2671 mutex_lock(&cnic_lock);
2672 cnic_ulp_stop(dev); 2736 cnic_ulp_stop(dev);
2673 cnic_stop_hw(dev); 2737 cnic_stop_hw(dev);
2674 mutex_unlock(&cnic_lock); 2738 cnic_unregister_netdev(dev);
2675 } else if (event == NETDEV_UNREGISTER) { 2739 } else if (event == NETDEV_UNREGISTER) {
2676 write_lock(&cnic_dev_lock); 2740 write_lock(&cnic_dev_lock);
2677 list_del_init(&dev->list); 2741 list_del_init(&dev->list);
@@ -2703,6 +2767,7 @@ static void cnic_release(void)
2703 } 2767 }
2704 2768
2705 cnic_ulp_exit(dev); 2769 cnic_ulp_exit(dev);
2770 cnic_unregister_netdev(dev);
2706 list_del_init(&dev->list); 2771 list_del_init(&dev->list);
2707 cnic_free_dev(dev); 2772 cnic_free_dev(dev);
2708 } 2773 }
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 5192d4a9df5a..a94b302bb464 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -176,6 +176,7 @@ struct cnic_local {
176 unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; 176 unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
177#define ULP_F_INIT 0 177#define ULP_F_INIT 0
178#define ULP_F_START 1 178#define ULP_F_START 1
179#define ULP_F_CALL_PENDING 2
179 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; 180 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
180 181
181 /* protected by ulp_lock */ 182 /* protected by ulp_lock */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index d1bce27ee99e..a49235739eef 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -290,6 +290,7 @@ struct cnic_ulp_ops {
290 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, 290 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
291 char *data, u16 data_size); 291 char *data, u16 data_size);
292 struct module *owner; 292 struct module *owner;
293 atomic_t ref_count;
293}; 294};
294 295
295extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); 296extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 41b648a67fec..3a6735dc9f6a 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1899,7 +1899,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1899 nic->ru_running = RU_SUSPENDED; 1899 nic->ru_running = RU_SUSPENDED;
1900 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, 1900 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1901 sizeof(struct rfd), 1901 sizeof(struct rfd),
1902 PCI_DMA_BIDIRECTIONAL); 1902 PCI_DMA_FROMDEVICE);
1903 return -ENODATA; 1903 return -ENODATA;
1904 } 1904 }
1905 1905
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d56c7473144a..99df2abf82a9 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,10 +338,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
338{ 338{
339 struct e1000_nvm_info *nvm = &hw->nvm; 339 struct e1000_nvm_info *nvm = &hw->nvm;
340 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 340 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
341 union ich8_hws_flash_status hsfsts; 341 u32 gfpreg, sector_base_addr, sector_end_addr;
342 u32 gfpreg;
343 u32 sector_base_addr;
344 u32 sector_end_addr;
345 u16 i; 342 u16 i;
346 343
347 /* Can't read flash registers if the register set isn't mapped. */ 344 /* Can't read flash registers if the register set isn't mapped. */
@@ -375,20 +372,6 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
375 /* Adjust to word count */ 372 /* Adjust to word count */
376 nvm->flash_bank_size /= sizeof(u16); 373 nvm->flash_bank_size /= sizeof(u16);
377 374
378 /*
379 * Make sure the flash bank size does not overwrite the 4k
380 * sector ranges. We may have 64k allotted to us but we only care
381 * about the first 2 4k sectors. Therefore, if we have anything less
382 * than 64k set in the HSFSTS register, we will reduce the bank size
383 * down to 4k and let the rest remain unused. If berasesz == 3, then
384 * we are working in 64k mode. Otherwise we are not.
385 */
386 if (nvm->flash_bank_size > E1000_ICH8_SHADOW_RAM_WORDS) {
387 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
388 if (hsfsts.hsf_status.berasesz != 3)
389 nvm->flash_bank_size = E1000_ICH8_SHADOW_RAM_WORDS;
390 }
391
392 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; 375 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
393 376
394 /* Clear shadow ram */ 377 /* Clear shadow ram */
@@ -594,8 +577,8 @@ static DEFINE_MUTEX(nvm_mutex);
594 **/ 577 **/
595static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) 578static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
596{ 579{
597 u32 extcnf_ctrl; 580 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
598 u32 timeout = PHY_CFG_TIMEOUT; 581 s32 ret_val = 0;
599 582
600 might_sleep(); 583 might_sleep();
601 584
@@ -603,28 +586,46 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
603 586
604 while (timeout) { 587 while (timeout) {
605 extcnf_ctrl = er32(EXTCNF_CTRL); 588 extcnf_ctrl = er32(EXTCNF_CTRL);
589 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
590 break;
606 591
607 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) { 592 mdelay(1);
608 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; 593 timeout--;
609 ew32(EXTCNF_CTRL, extcnf_ctrl); 594 }
595
596 if (!timeout) {
597 hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n");
598 ret_val = -E1000_ERR_CONFIG;
599 goto out;
600 }
601
602 timeout = PHY_CFG_TIMEOUT * 2;
603
604 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
605 ew32(EXTCNF_CTRL, extcnf_ctrl);
606
607 while (timeout) {
608 extcnf_ctrl = er32(EXTCNF_CTRL);
609 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
610 break;
610 611
611 extcnf_ctrl = er32(EXTCNF_CTRL);
612 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
613 break;
614 }
615 mdelay(1); 612 mdelay(1);
616 timeout--; 613 timeout--;
617 } 614 }
618 615
619 if (!timeout) { 616 if (!timeout) {
620 hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); 617 hw_dbg(hw, "Failed to acquire the semaphore.\n");
621 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 618 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
622 ew32(EXTCNF_CTRL, extcnf_ctrl); 619 ew32(EXTCNF_CTRL, extcnf_ctrl);
623 mutex_unlock(&nvm_mutex); 620 ret_val = -E1000_ERR_CONFIG;
624 return -E1000_ERR_CONFIG; 621 goto out;
625 } 622 }
626 623
627 return 0; 624out:
625 if (ret_val)
626 mutex_unlock(&nvm_mutex);
627
628 return ret_val;
628} 629}
629 630
630/** 631/**
@@ -1306,7 +1307,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1306 struct e1000_nvm_info *nvm = &hw->nvm; 1307 struct e1000_nvm_info *nvm = &hw->nvm;
1307 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 1308 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1308 u32 act_offset; 1309 u32 act_offset;
1309 s32 ret_val; 1310 s32 ret_val = 0;
1310 u32 bank = 0; 1311 u32 bank = 0;
1311 u16 i, word; 1312 u16 i, word;
1312 1313
@@ -1321,12 +1322,15 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1321 goto out; 1322 goto out;
1322 1323
1323 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1324 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1324 if (ret_val) 1325 if (ret_val) {
1325 goto release; 1326 hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n");
1327 bank = 0;
1328 }
1326 1329
1327 act_offset = (bank) ? nvm->flash_bank_size : 0; 1330 act_offset = (bank) ? nvm->flash_bank_size : 0;
1328 act_offset += offset; 1331 act_offset += offset;
1329 1332
1333 ret_val = 0;
1330 for (i = 0; i < words; i++) { 1334 for (i = 0; i < words; i++) {
1331 if ((dev_spec->shadow_ram) && 1335 if ((dev_spec->shadow_ram) &&
1332 (dev_spec->shadow_ram[offset+i].modified)) { 1336 (dev_spec->shadow_ram[offset+i].modified)) {
@@ -1341,7 +1345,6 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1341 } 1345 }
1342 } 1346 }
1343 1347
1344release:
1345 e1000_release_swflag_ich8lan(hw); 1348 e1000_release_swflag_ich8lan(hw);
1346 1349
1347out: 1350out:
@@ -1592,7 +1595,6 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1592{ 1595{
1593 struct e1000_nvm_info *nvm = &hw->nvm; 1596 struct e1000_nvm_info *nvm = &hw->nvm;
1594 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 1597 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1595 s32 ret_val;
1596 u16 i; 1598 u16 i;
1597 1599
1598 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1600 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
@@ -1601,17 +1603,11 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1601 return -E1000_ERR_NVM; 1603 return -E1000_ERR_NVM;
1602 } 1604 }
1603 1605
1604 ret_val = e1000_acquire_swflag_ich8lan(hw);
1605 if (ret_val)
1606 return ret_val;
1607
1608 for (i = 0; i < words; i++) { 1606 for (i = 0; i < words; i++) {
1609 dev_spec->shadow_ram[offset+i].modified = 1; 1607 dev_spec->shadow_ram[offset+i].modified = 1;
1610 dev_spec->shadow_ram[offset+i].value = data[i]; 1608 dev_spec->shadow_ram[offset+i].value = data[i];
1611 } 1609 }
1612 1610
1613 e1000_release_swflag_ich8lan(hw);
1614
1615 return 0; 1611 return 0;
1616} 1612}
1617 1613
@@ -1652,8 +1648,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1652 */ 1648 */
1653 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1649 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1654 if (ret_val) { 1650 if (ret_val) {
1655 e1000_release_swflag_ich8lan(hw); 1651 hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n");
1656 goto out; 1652 bank = 0;
1657 } 1653 }
1658 1654
1659 if (bank == 0) { 1655 if (bank == 0) {
@@ -2039,12 +2035,8 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2039 iteration = 1; 2035 iteration = 1;
2040 break; 2036 break;
2041 case 2: 2037 case 2:
2042 if (hw->mac.type == e1000_ich9lan) { 2038 sector_size = ICH_FLASH_SEG_SIZE_8K;
2043 sector_size = ICH_FLASH_SEG_SIZE_8K; 2039 iteration = 1;
2044 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K;
2045 } else {
2046 return -E1000_ERR_NVM;
2047 }
2048 break; 2040 break;
2049 case 3: 2041 case 3:
2050 sector_size = ICH_FLASH_SEG_SIZE_64K; 2042 sector_size = ICH_FLASH_SEG_SIZE_64K;
@@ -2056,7 +2048,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2056 2048
2057 /* Start with the base address, then add the sector offset. */ 2049 /* Start with the base address, then add the sector offset. */
2058 flash_linear_addr = hw->nvm.flash_base_addr; 2050 flash_linear_addr = hw->nvm.flash_base_addr;
2059 flash_linear_addr += (bank) ? (sector_size * iteration) : 0; 2051 flash_linear_addr += (bank) ? flash_bank_size : 0;
2060 2052
2061 for (j = 0; j < iteration ; j++) { 2053 for (j = 0; j < iteration ; j++) {
2062 do { 2054 do {
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 63415bb6f48f..fa92a683aefd 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4538,8 +4538,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4538 /* Allow time for pending master requests to run */ 4538 /* Allow time for pending master requests to run */
4539 e1000e_disable_pcie_master(&adapter->hw); 4539 e1000e_disable_pcie_master(&adapter->hw);
4540 4540
4541 if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) && 4541 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
4542 !(hw->mac.ops.check_mng_mode(hw))) {
4543 /* enable wakeup by the PHY */ 4542 /* enable wakeup by the PHY */
4544 retval = e1000_init_phy_wakeup(adapter, wufc); 4543 retval = e1000_init_phy_wakeup(adapter, wufc);
4545 if (retval) 4544 if (retval)
@@ -4557,7 +4556,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4557 *enable_wake = !!wufc; 4556 *enable_wake = !!wufc;
4558 4557
4559 /* make sure adapter isn't asleep if manageability is enabled */ 4558 /* make sure adapter isn't asleep if manageability is enabled */
4560 if (adapter->flags & FLAG_MNG_PT_ENABLED) 4559 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
4560 (hw->mac.ops.check_mng_mode(hw)))
4561 *enable_wake = true; 4561 *enable_wake = true;
4562 4562
4563 if (adapter->hw.phy.type == e1000_phy_igp_3) 4563 if (adapter->hw.phy.type == e1000_phy_igp_3)
@@ -4670,14 +4670,6 @@ static int e1000_resume(struct pci_dev *pdev)
4670 return err; 4670 return err;
4671 } 4671 }
4672 4672
4673 /* AER (Advanced Error Reporting) hooks */
4674 err = pci_enable_pcie_error_reporting(pdev);
4675 if (err) {
4676 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4677 "0x%x\n", err);
4678 /* non-fatal, continue */
4679 }
4680
4681 pci_set_master(pdev); 4673 pci_set_master(pdev);
4682 4674
4683 pci_enable_wake(pdev, PCI_D3hot, 0); 4675 pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -4990,6 +4982,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4990 if (err) 4982 if (err)
4991 goto err_pci_reg; 4983 goto err_pci_reg;
4992 4984
4985 /* AER (Advanced Error Reporting) hooks */
4986 err = pci_enable_pcie_error_reporting(pdev);
4987 if (err) {
4988 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4989 "0x%x\n", err);
4990 /* non-fatal, continue */
4991 }
4992
4993 pci_set_master(pdev); 4993 pci_set_master(pdev);
4994 /* PCI config space info */ 4994 /* PCI config space info */
4995 err = pci_save_state(pdev); 4995 err = pci_save_state(pdev);
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index d4b98074b1b7..c9fd82d3a80d 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -285,6 +285,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
285{ 285{
286 struct fec_enet_private *fep = netdev_priv(dev); 286 struct fec_enet_private *fep = netdev_priv(dev);
287 struct bufdesc *bdp; 287 struct bufdesc *bdp;
288 void *bufaddr;
288 unsigned short status; 289 unsigned short status;
289 unsigned long flags; 290 unsigned long flags;
290 291
@@ -312,7 +313,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
312 status &= ~BD_ENET_TX_STATS; 313 status &= ~BD_ENET_TX_STATS;
313 314
314 /* Set buffer length and buffer pointer */ 315 /* Set buffer length and buffer pointer */
315 bdp->cbd_bufaddr = __pa(skb->data); 316 bufaddr = skb->data;
316 bdp->cbd_datlen = skb->len; 317 bdp->cbd_datlen = skb->len;
317 318
318 /* 319 /*
@@ -320,11 +321,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
320 * 4-byte boundaries. Use bounce buffers to copy data 321 * 4-byte boundaries. Use bounce buffers to copy data
321 * and get it aligned. Ugh. 322 * and get it aligned. Ugh.
322 */ 323 */
323 if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { 324 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
324 unsigned int index; 325 unsigned int index;
325 index = bdp - fep->tx_bd_base; 326 index = bdp - fep->tx_bd_base;
326 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); 327 memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
327 bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); 328 bufaddr = fep->tx_bounce[index];
328 } 329 }
329 330
330 /* Save skb pointer */ 331 /* Save skb pointer */
@@ -336,7 +337,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
336 /* Push the data cache so the CPM does not get stale memory 337 /* Push the data cache so the CPM does not get stale memory
337 * data. 338 * data.
338 */ 339 */
339 bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, 340 bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
340 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 341 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
341 342
342 /* Send it on its way. Tell FEC it's ready, interrupt when done, 343 /* Send it on its way. Tell FEC it's ready, interrupt when done,
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index cc786333d95c..c40113f58963 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
309{ 309{
310 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 310 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
311 struct bcom_fec_bd *bd; 311 struct bcom_fec_bd *bd;
312 unsigned long flags;
312 313
313 if (bcom_queue_full(priv->tx_dmatsk)) { 314 if (bcom_queue_full(priv->tx_dmatsk)) {
314 if (net_ratelimit()) 315 if (net_ratelimit())
@@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
316 return NETDEV_TX_BUSY; 317 return NETDEV_TX_BUSY;
317 } 318 }
318 319
319 spin_lock_irq(&priv->lock); 320 spin_lock_irqsave(&priv->lock, flags);
320 dev->trans_start = jiffies; 321 dev->trans_start = jiffies;
321 322
322 bd = (struct bcom_fec_bd *) 323 bd = (struct bcom_fec_bd *)
@@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
332 netif_stop_queue(dev); 333 netif_stop_queue(dev);
333 } 334 }
334 335
335 spin_unlock_irq(&priv->lock); 336 spin_unlock_irqrestore(&priv->lock, flags);
336 337
337 return NETDEV_TX_OK; 338 return NETDEV_TX_OK;
338} 339}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index f8ffcbf0bc39..e212f2c5448b 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -936,6 +936,7 @@ int startup_gfar(struct net_device *dev)
936 struct gfar __iomem *regs = priv->regs; 936 struct gfar __iomem *regs = priv->regs;
937 int err = 0; 937 int err = 0;
938 u32 rctrl = 0; 938 u32 rctrl = 0;
939 u32 tctrl = 0;
939 u32 attrs = 0; 940 u32 attrs = 0;
940 941
941 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 942 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
@@ -1111,11 +1112,19 @@ int startup_gfar(struct net_device *dev)
1111 rctrl |= RCTRL_PADDING(priv->padding); 1112 rctrl |= RCTRL_PADDING(priv->padding);
1112 } 1113 }
1113 1114
1115 /* keep vlan related bits if it's enabled */
1116 if (priv->vlgrp) {
1117 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
1118 tctrl |= TCTRL_VLINS;
1119 }
1120
1114 /* Init rctrl based on our settings */ 1121 /* Init rctrl based on our settings */
1115 gfar_write(&priv->regs->rctrl, rctrl); 1122 gfar_write(&priv->regs->rctrl, rctrl);
1116 1123
1117 if (dev->features & NETIF_F_IP_CSUM) 1124 if (dev->features & NETIF_F_IP_CSUM)
1118 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM); 1125 tctrl |= TCTRL_INIT_CSUM;
1126
1127 gfar_write(&priv->regs->tctrl, tctrl);
1119 1128
1120 /* Set the extraction length and index */ 1129 /* Set the extraction length and index */
1121 attrs = ATTRELI_EL(priv->rx_stash_size) | 1130 attrs = ATTRELI_EL(priv->rx_stash_size) |
@@ -1450,7 +1459,6 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1450 1459
1451 /* Enable VLAN tag extraction */ 1460 /* Enable VLAN tag extraction */
1452 tempval = gfar_read(&priv->regs->rctrl); 1461 tempval = gfar_read(&priv->regs->rctrl);
1453 tempval |= RCTRL_VLEX;
1454 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 1462 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1455 gfar_write(&priv->regs->rctrl, tempval); 1463 gfar_write(&priv->regs->rctrl, tempval);
1456 } else { 1464 } else {
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index beb84213b671..f0f890803710 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1305,6 +1305,8 @@ static int emac_close(struct net_device *ndev)
1305 1305
1306 free_irq(dev->emac_irq, dev); 1306 free_irq(dev->emac_irq, dev);
1307 1307
1308 netif_carrier_off(ndev);
1309
1308 return 0; 1310 return 0;
1309} 1311}
1310 1312
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index c4361d466597..ee1cff5c9b21 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -23,7 +23,6 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28#include <linux/rtnetlink.h> 27#include <linux/rtnetlink.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
@@ -205,9 +204,6 @@ static const struct net_device_ops au1k_irda_netdev_ops = {
205 .ndo_start_xmit = au1k_irda_hard_xmit, 204 .ndo_start_xmit = au1k_irda_hard_xmit,
206 .ndo_tx_timeout = au1k_tx_timeout, 205 .ndo_tx_timeout = au1k_tx_timeout,
207 .ndo_do_ioctl = au1k_irda_ioctl, 206 .ndo_do_ioctl = au1k_irda_ioctl,
208 .ndo_change_mtu = eth_change_mtu,
209 .ndo_validate_addr = eth_validate_addr,
210 .ndo_set_mac_address = eth_mac_addr,
211}; 207};
212 208
213static int au1k_irda_net_init(struct net_device *dev) 209static int au1k_irda_net_init(struct net_device *dev)
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 3376a4f39e0a..77d10edefd25 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -803,9 +803,6 @@ static const struct net_device_ops pxa_irda_netdev_ops = {
803 .ndo_stop = pxa_irda_stop, 803 .ndo_stop = pxa_irda_stop,
804 .ndo_start_xmit = pxa_irda_hard_xmit, 804 .ndo_start_xmit = pxa_irda_hard_xmit,
805 .ndo_do_ioctl = pxa_irda_ioctl, 805 .ndo_do_ioctl = pxa_irda_ioctl,
806 .ndo_change_mtu = eth_change_mtu,
807 .ndo_validate_addr = eth_validate_addr,
808 .ndo_set_mac_address = eth_mac_addr,
809}; 806};
810 807
811static int pxa_irda_probe(struct platform_device *pdev) 808static int pxa_irda_probe(struct platform_device *pdev)
@@ -830,6 +827,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
830 if (!dev) 827 if (!dev)
831 goto err_mem_3; 828 goto err_mem_3;
832 829
830 SET_NETDEV_DEV(dev, &pdev->dev);
833 si = netdev_priv(dev); 831 si = netdev_priv(dev);
834 si->dev = &pdev->dev; 832 si->dev = &pdev->dev;
835 si->pdata = pdev->dev.platform_data; 833 si->pdata = pdev->dev.platform_data;
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 2aeb2e6aec1b..b039cb081e94 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -24,7 +24,6 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/slab.h> 27#include <linux/slab.h>
29#include <linux/rtnetlink.h> 28#include <linux/rtnetlink.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
@@ -881,9 +880,6 @@ static const struct net_device_ops sa1100_irda_netdev_ops = {
881 .ndo_stop = sa1100_irda_stop, 880 .ndo_stop = sa1100_irda_stop,
882 .ndo_start_xmit = sa1100_irda_hard_xmit, 881 .ndo_start_xmit = sa1100_irda_hard_xmit,
883 .ndo_do_ioctl = sa1100_irda_ioctl, 882 .ndo_do_ioctl = sa1100_irda_ioctl,
884 .ndo_change_mtu = eth_change_mtu,
885 .ndo_validate_addr = eth_validate_addr,
886 .ndo_set_mac_address = eth_mac_addr,
887}; 883};
888 884
889static int sa1100_irda_probe(struct platform_device *pdev) 885static int sa1100_irda_probe(struct platform_device *pdev)
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index d0883835b0c6..fe4f2b2bff96 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -115,7 +115,7 @@ static int __init w83977af_init(void)
115 115
116 IRDA_DEBUG(0, "%s()\n", __func__ ); 116 IRDA_DEBUG(0, "%s()\n", __func__ );
117 117
118 for (i=0; (io[i] < 2000) && (i < ARRAY_SIZE(dev_self)); i++) { 118 for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
119 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0) 119 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
120 return 0; 120 return 0;
121 } 121 }
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e11d83d5852b..2c4dc8221dcd 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -136,6 +136,8 @@ struct ixgbe_ring {
136 136
137 u8 queue_index; /* needed for multiqueue queue management */ 137 u8 queue_index; /* needed for multiqueue queue management */
138 138
139#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
140 u8 flags; /* per ring feature flags */
139 u16 head; 141 u16 head;
140 u16 tail; 142 u16 tail;
141 143
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 79144e950a34..dff8dfac7ed9 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1948,6 +1948,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
1948 struct ethtool_coalesce *ec) 1948 struct ethtool_coalesce *ec)
1949{ 1949{
1950 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1950 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1951 struct ixgbe_q_vector *q_vector;
1951 int i; 1952 int i;
1952 1953
1953 if (ec->tx_max_coalesced_frames_irq) 1954 if (ec->tx_max_coalesced_frames_irq)
@@ -1982,14 +1983,24 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
1982 adapter->itr_setting = 0; 1983 adapter->itr_setting = 0;
1983 } 1984 }
1984 1985
1985 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 1986 /* MSI/MSIx Interrupt Mode */
1986 struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; 1987 if (adapter->flags &
1987 if (q_vector->txr_count && !q_vector->rxr_count) 1988 (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
1988 /* tx vector gets half the rate */ 1989 int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1989 q_vector->eitr = (adapter->eitr_param >> 1); 1990 for (i = 0; i < num_vectors; i++) {
1990 else 1991 q_vector = adapter->q_vector[i];
1991 /* rx only or mixed */ 1992 if (q_vector->txr_count && !q_vector->rxr_count)
1992 q_vector->eitr = adapter->eitr_param; 1993 /* tx vector gets half the rate */
1994 q_vector->eitr = (adapter->eitr_param >> 1);
1995 else
1996 /* rx only or mixed */
1997 q_vector->eitr = adapter->eitr_param;
1998 ixgbe_write_eitr(q_vector);
1999 }
2000 /* Legacy Interrupt Mode */
2001 } else {
2002 q_vector = adapter->q_vector[0];
2003 q_vector->eitr = adapter->eitr_param;
1993 ixgbe_write_eitr(q_vector); 2004 ixgbe_write_eitr(q_vector);
1994 } 2005 }
1995 2006
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index fa9f24e23683..28cf104e36cc 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -336,7 +336,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
336 /* return 0 to bypass going to ULD for DDPed data */ 336 /* return 0 to bypass going to ULD for DDPed data */
337 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) 337 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
338 rc = 0; 338 rc = 0;
339 else 339 else if (ddp->len)
340 rc = ddp->len; 340 rc = ddp->len;
341 } 341 }
342 342
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 110c65ab5cb5..77b0381a2b5c 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -492,12 +492,12 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
492 492
493 skb_record_rx_queue(skb, ring->queue_index); 493 skb_record_rx_queue(skb, ring->queue_index);
494 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 494 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
495 if (adapter->vlgrp && is_vlan && (tag != 0)) 495 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
496 vlan_gro_receive(napi, adapter->vlgrp, tag, skb); 496 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
497 else 497 else
498 napi_gro_receive(napi, skb); 498 napi_gro_receive(napi, skb);
499 } else { 499 } else {
500 if (adapter->vlgrp && is_vlan && (tag != 0)) 500 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
501 vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 501 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
502 else 502 else
503 netif_rx(skb); 503 netif_rx(skb);
@@ -585,7 +585,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
585 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 585 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
586 586
587 if (!bi->page_dma && 587 if (!bi->page_dma &&
588 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { 588 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
589 if (!bi->page) { 589 if (!bi->page) {
590 bi->page = alloc_page(GFP_ATOMIC); 590 bi->page = alloc_page(GFP_ATOMIC);
591 if (!bi->page) { 591 if (!bi->page) {
@@ -629,7 +629,7 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
629 } 629 }
630 /* Refresh the desc even if buffer_addrs didn't change because 630 /* Refresh the desc even if buffer_addrs didn't change because
631 * each write-back erases this info. */ 631 * each write-back erases this info. */
632 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 632 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
633 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 633 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
634 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 634 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
635 } else { 635 } else {
@@ -726,7 +726,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
726 break; 726 break;
727 (*work_done)++; 727 (*work_done)++;
728 728
729 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 729 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
730 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 730 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
731 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 731 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
732 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 732 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
@@ -798,7 +798,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
798 rx_ring->stats.packets++; 798 rx_ring->stats.packets++;
799 rx_ring->stats.bytes += skb->len; 799 rx_ring->stats.bytes += skb->len;
800 } else { 800 } else {
801 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 801 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
802 rx_buffer_info->skb = next_buffer->skb; 802 rx_buffer_info->skb = next_buffer->skb;
803 rx_buffer_info->dma = next_buffer->dma; 803 rx_buffer_info->dma = next_buffer->dma;
804 next_buffer->skb = skb; 804 next_buffer->skb = skb;
@@ -1898,46 +1898,19 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1898 1898
1899#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1899#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1900 1900
1901static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index) 1901static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
1902 struct ixgbe_ring *rx_ring)
1902{ 1903{
1903 struct ixgbe_ring *rx_ring;
1904 u32 srrctl; 1904 u32 srrctl;
1905 int queue0 = 0; 1905 int index;
1906 unsigned long mask;
1907 struct ixgbe_ring_feature *feature = adapter->ring_feature; 1906 struct ixgbe_ring_feature *feature = adapter->ring_feature;
1908 1907
1909 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1908 index = rx_ring->reg_idx;
1910 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1909 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1911 int dcb_i = feature[RING_F_DCB].indices; 1910 unsigned long mask;
1912 if (dcb_i == 8)
1913 queue0 = index >> 4;
1914 else if (dcb_i == 4)
1915 queue0 = index >> 5;
1916 else
1917 dev_err(&adapter->pdev->dev, "Invalid DCB "
1918 "configuration\n");
1919#ifdef IXGBE_FCOE
1920 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
1921 struct ixgbe_ring_feature *f;
1922
1923 rx_ring = &adapter->rx_ring[queue0];
1924 f = &adapter->ring_feature[RING_F_FCOE];
1925 if ((queue0 == 0) && (index > rx_ring->reg_idx))
1926 queue0 = f->mask + index -
1927 rx_ring->reg_idx - 1;
1928 }
1929#endif /* IXGBE_FCOE */
1930 } else {
1931 queue0 = index;
1932 }
1933 } else {
1934 mask = (unsigned long) feature[RING_F_RSS].mask; 1911 mask = (unsigned long) feature[RING_F_RSS].mask;
1935 queue0 = index & mask;
1936 index = index & mask; 1912 index = index & mask;
1937 } 1913 }
1938
1939 rx_ring = &adapter->rx_ring[queue0];
1940
1941 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); 1914 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1942 1915
1943 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 1916 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -1946,7 +1919,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1946 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 1919 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1947 IXGBE_SRRCTL_BSIZEHDR_MASK; 1920 IXGBE_SRRCTL_BSIZEHDR_MASK;
1948 1921
1949 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1922 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1950#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER 1923#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
1951 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1924 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1952#else 1925#else
@@ -2002,6 +1975,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2002{ 1975{
2003 u64 rdba; 1976 u64 rdba;
2004 struct ixgbe_hw *hw = &adapter->hw; 1977 struct ixgbe_hw *hw = &adapter->hw;
1978 struct ixgbe_ring *rx_ring;
2005 struct net_device *netdev = adapter->netdev; 1979 struct net_device *netdev = adapter->netdev;
2006 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1980 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2007 int i, j; 1981 int i, j;
@@ -2018,11 +1992,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2018 /* Decide whether to use packet split mode or not */ 1992 /* Decide whether to use packet split mode or not */
2019 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 1993 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
2020 1994
2021#ifdef IXGBE_FCOE
2022 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
2023 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
2024#endif /* IXGBE_FCOE */
2025
2026 /* Set the RX buffer length according to the mode */ 1995 /* Set the RX buffer length according to the mode */
2027 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1996 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
2028 rx_buf_len = IXGBE_RX_HDR_SIZE; 1997 rx_buf_len = IXGBE_RX_HDR_SIZE;
@@ -2070,29 +2039,35 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2070 * the Base and Length of the Rx Descriptor Ring 2039 * the Base and Length of the Rx Descriptor Ring
2071 */ 2040 */
2072 for (i = 0; i < adapter->num_rx_queues; i++) { 2041 for (i = 0; i < adapter->num_rx_queues; i++) {
2073 rdba = adapter->rx_ring[i].dma; 2042 rx_ring = &adapter->rx_ring[i];
2074 j = adapter->rx_ring[i].reg_idx; 2043 rdba = rx_ring->dma;
2044 j = rx_ring->reg_idx;
2075 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32))); 2045 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
2076 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); 2046 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2077 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen); 2047 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2078 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); 2048 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2079 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); 2049 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
2080 adapter->rx_ring[i].head = IXGBE_RDH(j); 2050 rx_ring->head = IXGBE_RDH(j);
2081 adapter->rx_ring[i].tail = IXGBE_RDT(j); 2051 rx_ring->tail = IXGBE_RDT(j);
2082 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 2052 rx_ring->rx_buf_len = rx_buf_len;
2053
2054 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2055 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
2083 2056
2084#ifdef IXGBE_FCOE 2057#ifdef IXGBE_FCOE
2085 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 2058 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
2086 struct ixgbe_ring_feature *f; 2059 struct ixgbe_ring_feature *f;
2087 f = &adapter->ring_feature[RING_F_FCOE]; 2060 f = &adapter->ring_feature[RING_F_FCOE];
2088 if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) && 2061 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2089 (i >= f->mask) && (i < f->mask + f->indices)) 2062 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2090 adapter->rx_ring[i].rx_buf_len = 2063 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2091 IXGBE_FCOE_JUMBO_FRAME_SIZE; 2064 rx_ring->rx_buf_len =
2065 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2066 }
2092 } 2067 }
2093 2068
2094#endif /* IXGBE_FCOE */ 2069#endif /* IXGBE_FCOE */
2095 ixgbe_configure_srrctl(adapter, j); 2070 ixgbe_configure_srrctl(adapter, rx_ring);
2096 } 2071 }
2097 2072
2098 if (hw->mac.type == ixgbe_mac_82598EB) { 2073 if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -2168,7 +2143,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2168 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2143 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2169 /* Enable 82599 HW-RSC */ 2144 /* Enable 82599 HW-RSC */
2170 for (i = 0; i < adapter->num_rx_queues; i++) { 2145 for (i = 0; i < adapter->num_rx_queues; i++) {
2171 j = adapter->rx_ring[i].reg_idx; 2146 rx_ring = &adapter->rx_ring[i];
2147 j = rx_ring->reg_idx;
2172 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); 2148 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2173 rscctrl |= IXGBE_RSCCTL_RSCEN; 2149 rscctrl |= IXGBE_RSCCTL_RSCEN;
2174 /* 2150 /*
@@ -2176,7 +2152,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2176 * total size of max desc * buf_len is not greater 2152 * total size of max desc * buf_len is not greater
2177 * than 65535 2153 * than 65535
2178 */ 2154 */
2179 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 2155 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2180#if (MAX_SKB_FRAGS > 16) 2156#if (MAX_SKB_FRAGS > 16)
2181 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2157 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2182#elif (MAX_SKB_FRAGS > 8) 2158#elif (MAX_SKB_FRAGS > 8)
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 2a0174b62e96..92fb8235c766 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
41 struct ixpdev_priv *ip = netdev_priv(dev); 41 struct ixpdev_priv *ip = netdev_priv(dev);
42 struct ixpdev_tx_desc *desc; 42 struct ixpdev_tx_desc *desc;
43 int entry; 43 int entry;
44 unsigned long flags;
44 45
45 if (unlikely(skb->len > PAGE_SIZE)) { 46 if (unlikely(skb->len > PAGE_SIZE)) {
46 /* @@@ Count drops. */ 47 /* @@@ Count drops. */
@@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
63 64
64 dev->trans_start = jiffies; 65 dev->trans_start = jiffies;
65 66
66 local_irq_disable(); 67 local_irq_save(flags);
67 ip->tx_queue_entries++; 68 ip->tx_queue_entries++;
68 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) 69 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
69 netif_stop_queue(dev); 70 netif_stop_queue(dev);
70 local_irq_enable(); 71 local_irq_restore(flags);
71 72
72 return 0; 73 return 0;
73} 74}
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 5b5c25368d1e..e3601cf3f931 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
620 dma_addr_t mapping; 620 dma_addr_t mapping;
621 unsigned int len, entry; 621 unsigned int len, entry;
622 u32 ctrl; 622 u32 ctrl;
623 unsigned long flags;
623 624
624#ifdef DEBUG 625#ifdef DEBUG
625 int i; 626 int i;
@@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
635#endif 636#endif
636 637
637 len = skb->len; 638 len = skb->len;
638 spin_lock_irq(&bp->lock); 639 spin_lock_irqsave(&bp->lock, flags);
639 640
640 /* This is a hard error, log it. */ 641 /* This is a hard error, log it. */
641 if (TX_BUFFS_AVAIL(bp) < 1) { 642 if (TX_BUFFS_AVAIL(bp) < 1) {
642 netif_stop_queue(dev); 643 netif_stop_queue(dev);
643 spin_unlock_irq(&bp->lock); 644 spin_unlock_irqrestore(&bp->lock, flags);
644 dev_err(&bp->pdev->dev, 645 dev_err(&bp->pdev->dev,
645 "BUG! Tx Ring full when queue awake!\n"); 646 "BUG! Tx Ring full when queue awake!\n");
646 dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", 647 dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
@@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
674 if (TX_BUFFS_AVAIL(bp) < 1) 675 if (TX_BUFFS_AVAIL(bp) < 1)
675 netif_stop_queue(dev); 676 netif_stop_queue(dev);
676 677
677 spin_unlock_irq(&bp->lock); 678 spin_unlock_irqrestore(&bp->lock, flags);
678 679
679 dev->trans_start = jiffies; 680 dev->trans_start = jiffies;
680 681
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 91bdfdfd431f..3ac0404d0d11 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -506,8 +506,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
506 PCI_DMA_FROMDEVICE); 506 PCI_DMA_FROMDEVICE);
507 } 507 }
508 /* Adjust size of last fragment to match actual length */ 508 /* Adjust size of last fragment to match actual length */
509 skb_frags_rx[nr - 1].size = length - 509 if (nr > 0)
510 priv->frag_info[nr - 1].frag_prefix_size; 510 skb_frags_rx[nr - 1].size = length -
511 priv->frag_info[nr - 1].frag_prefix_size;
511 return nr; 512 return nr;
512 513
513fail: 514fail:
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 5a88b3f57693..62208401c4df 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -437,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
437{ 437{
438 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; 438 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
439 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; 439 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
440 unsigned long flags;
440 441
441 /* If we don't have a pending timer, set one up to catch our recent 442 /* If we don't have a pending timer, set one up to catch our recent
442 post in case the interface becomes idle */ 443 post in case the interface becomes idle */
@@ -445,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
445 446
446 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 447 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
447 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 448 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
448 if (spin_trylock_irq(&ring->comp_lock)) { 449 if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
449 mlx4_en_process_tx_cq(priv->dev, cq); 450 mlx4_en_process_tx_cq(priv->dev, cq);
450 spin_unlock_irq(&ring->comp_lock); 451 spin_unlock_irqrestore(&ring->comp_lock, flags);
451 } 452 }
452} 453}
453 454
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index f86e05047d19..a9c1fcca5e75 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1254,7 +1254,7 @@ struct netxen_adapter {
1254 u8 mc_enabled; 1254 u8 mc_enabled;
1255 u8 max_mc_count; 1255 u8 max_mc_count;
1256 u8 rss_supported; 1256 u8 rss_supported;
1257 u8 resv2; 1257 u8 link_changed;
1258 u32 resv3; 1258 u32 resv3;
1259 1259
1260 u8 has_link_events; 1260 u8 has_link_events;
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 7acf204e38c9..5d3343ef3d86 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -184,13 +184,6 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
184 kfree(recv_ctx->rds_rings); 184 kfree(recv_ctx->rds_rings);
185 185
186skip_rds: 186skip_rds:
187 if (recv_ctx->sds_rings == NULL)
188 goto skip_sds;
189
190 for(ring = 0; ring < adapter->max_sds_rings; ring++)
191 recv_ctx->sds_rings[ring].consumer = 0;
192
193skip_sds:
194 if (adapter->tx_ring == NULL) 187 if (adapter->tx_ring == NULL)
195 return; 188 return;
196 189
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 3cd8cfcf627b..28f270f5ac78 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -94,10 +94,6 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
94 94
95MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); 95MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
96 96
97static struct workqueue_struct *netxen_workq;
98#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
99#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
100
101static void netxen_watchdog(unsigned long); 97static void netxen_watchdog(unsigned long);
102 98
103static uint32_t crb_cmd_producer[4] = { 99static uint32_t crb_cmd_producer[4] = {
@@ -171,6 +167,8 @@ netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
171{ 167{
172 if (recv_ctx->sds_rings != NULL) 168 if (recv_ctx->sds_rings != NULL)
173 kfree(recv_ctx->sds_rings); 169 kfree(recv_ctx->sds_rings);
170
171 recv_ctx->sds_rings = NULL;
174} 172}
175 173
176static int 174static int
@@ -193,6 +191,21 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
193} 191}
194 192
195static void 193static void
194netxen_napi_del(struct netxen_adapter *adapter)
195{
196 int ring;
197 struct nx_host_sds_ring *sds_ring;
198 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
199
200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
201 sds_ring = &recv_ctx->sds_rings[ring];
202 netif_napi_del(&sds_ring->napi);
203 }
204
205 netxen_free_sds_rings(&adapter->recv_ctx);
206}
207
208static void
196netxen_napi_enable(struct netxen_adapter *adapter) 209netxen_napi_enable(struct netxen_adapter *adapter)
197{ 210{
198 int ring; 211 int ring;
@@ -260,7 +273,7 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
260 change = 0; 273 change = 0;
261 274
262 shift = NXRD32(adapter, CRB_DMA_SHIFT); 275 shift = NXRD32(adapter, CRB_DMA_SHIFT);
263 if (shift >= 32) 276 if (shift > 32)
264 return 0; 277 return 0;
265 278
266 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9)) 279 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
@@ -272,7 +285,7 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
272 old_mask = pdev->dma_mask; 285 old_mask = pdev->dma_mask;
273 old_cmask = pdev->dev.coherent_dma_mask; 286 old_cmask = pdev->dev.coherent_dma_mask;
274 287
275 mask = (1ULL<<(32+shift)) - 1; 288 mask = DMA_BIT_MASK(32+shift);
276 289
277 err = pci_set_dma_mask(pdev, mask); 290 err = pci_set_dma_mask(pdev, mask);
278 if (err) 291 if (err)
@@ -880,7 +893,6 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
880 spin_unlock(&adapter->tx_clean_lock); 893 spin_unlock(&adapter->tx_clean_lock);
881 894
882 del_timer_sync(&adapter->watchdog_timer); 895 del_timer_sync(&adapter->watchdog_timer);
883 FLUSH_SCHEDULED_WORK();
884} 896}
885 897
886 898
@@ -894,10 +906,12 @@ netxen_nic_attach(struct netxen_adapter *adapter)
894 struct nx_host_tx_ring *tx_ring; 906 struct nx_host_tx_ring *tx_ring;
895 907
896 err = netxen_init_firmware(adapter); 908 err = netxen_init_firmware(adapter);
897 if (err != 0) { 909 if (err)
898 printk(KERN_ERR "Failed to init firmware\n"); 910 return err;
899 return -EIO; 911
900 } 912 err = netxen_napi_add(adapter, netdev);
913 if (err)
914 return err;
901 915
902 if (adapter->fw_major < 4) 916 if (adapter->fw_major < 4)
903 adapter->max_rds_rings = 3; 917 adapter->max_rds_rings = 3;
@@ -961,6 +975,7 @@ netxen_nic_detach(struct netxen_adapter *adapter)
961 netxen_free_hw_resources(adapter); 975 netxen_free_hw_resources(adapter);
962 netxen_release_rx_buffers(adapter); 976 netxen_release_rx_buffers(adapter);
963 netxen_nic_free_irq(adapter); 977 netxen_nic_free_irq(adapter);
978 netxen_napi_del(adapter);
964 netxen_free_sw_resources(adapter); 979 netxen_free_sw_resources(adapter);
965 980
966 adapter->is_up = 0; 981 adapter->is_up = 0;
@@ -1105,9 +1120,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1105 1120
1106 netdev->irq = adapter->msix_entries[0].vector; 1121 netdev->irq = adapter->msix_entries[0].vector;
1107 1122
1108 if (netxen_napi_add(adapter, netdev))
1109 goto err_out_disable_msi;
1110
1111 init_timer(&adapter->watchdog_timer); 1123 init_timer(&adapter->watchdog_timer);
1112 adapter->watchdog_timer.function = &netxen_watchdog; 1124 adapter->watchdog_timer.function = &netxen_watchdog;
1113 adapter->watchdog_timer.data = (unsigned long)adapter; 1125 adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -1177,6 +1189,9 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1177 1189
1178 unregister_netdev(netdev); 1190 unregister_netdev(netdev);
1179 1191
1192 cancel_work_sync(&adapter->watchdog_task);
1193 cancel_work_sync(&adapter->tx_timeout_task);
1194
1180 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) { 1195 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
1181 netxen_nic_detach(adapter); 1196 netxen_nic_detach(adapter);
1182 } 1197 }
@@ -1185,7 +1200,6 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1185 netxen_free_adapter_offload(adapter); 1200 netxen_free_adapter_offload(adapter);
1186 1201
1187 netxen_teardown_intr(adapter); 1202 netxen_teardown_intr(adapter);
1188 netxen_free_sds_rings(&adapter->recv_ctx);
1189 1203
1190 netxen_cleanup_pci_map(adapter); 1204 netxen_cleanup_pci_map(adapter);
1191 1205
@@ -1211,6 +1225,9 @@ netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
1211 if (netif_running(netdev)) 1225 if (netif_running(netdev))
1212 netxen_nic_down(adapter, netdev); 1226 netxen_nic_down(adapter, netdev);
1213 1227
1228 cancel_work_sync(&adapter->watchdog_task);
1229 cancel_work_sync(&adapter->tx_timeout_task);
1230
1214 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) 1231 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
1215 netxen_nic_detach(adapter); 1232 netxen_nic_detach(adapter);
1216 1233
@@ -1549,11 +1566,6 @@ static int netxen_nic_check_temp(struct netxen_adapter *adapter)
1549 "%s: Device temperature %d degrees C exceeds" 1566 "%s: Device temperature %d degrees C exceeds"
1550 " maximum allowed. Hardware has been shut down.\n", 1567 " maximum allowed. Hardware has been shut down.\n",
1551 netdev->name, temp_val); 1568 netdev->name, temp_val);
1552
1553 netif_device_detach(netdev);
1554 netxen_nic_down(adapter, netdev);
1555 netxen_nic_detach(adapter);
1556
1557 rv = 1; 1569 rv = 1;
1558 } else if (temp_state == NX_TEMP_WARN) { 1570 } else if (temp_state == NX_TEMP_WARN) {
1559 if (adapter->temp == NX_TEMP_NORMAL) { 1571 if (adapter->temp == NX_TEMP_NORMAL) {
@@ -1587,10 +1599,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
1587 netif_carrier_off(netdev); 1599 netif_carrier_off(netdev);
1588 netif_stop_queue(netdev); 1600 netif_stop_queue(netdev);
1589 } 1601 }
1590 1602 adapter->link_changed = !adapter->has_link_events;
1591 if (!adapter->has_link_events)
1592 netxen_nic_set_link_parameters(adapter);
1593
1594 } else if (!adapter->ahw.linkup && linkup) { 1603 } else if (!adapter->ahw.linkup && linkup) {
1595 printk(KERN_INFO "%s: %s NIC Link is up\n", 1604 printk(KERN_INFO "%s: %s NIC Link is up\n",
1596 netxen_nic_driver_name, netdev->name); 1605 netxen_nic_driver_name, netdev->name);
@@ -1599,9 +1608,7 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
1599 netif_carrier_on(netdev); 1608 netif_carrier_on(netdev);
1600 netif_wake_queue(netdev); 1609 netif_wake_queue(netdev);
1601 } 1610 }
1602 1611 adapter->link_changed = !adapter->has_link_events;
1603 if (!adapter->has_link_events)
1604 netxen_nic_set_link_parameters(adapter);
1605 } 1612 }
1606} 1613}
1607 1614
@@ -1628,11 +1635,36 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
1628 netxen_advert_link_change(adapter, linkup); 1635 netxen_advert_link_change(adapter, linkup);
1629} 1636}
1630 1637
1638static void netxen_nic_thermal_shutdown(struct netxen_adapter *adapter)
1639{
1640 struct net_device *netdev = adapter->netdev;
1641
1642 netif_device_detach(netdev);
1643 netxen_nic_down(adapter, netdev);
1644 netxen_nic_detach(adapter);
1645}
1646
1631static void netxen_watchdog(unsigned long v) 1647static void netxen_watchdog(unsigned long v)
1632{ 1648{
1633 struct netxen_adapter *adapter = (struct netxen_adapter *)v; 1649 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
1634 1650
1635 SCHEDULE_WORK(&adapter->watchdog_task); 1651 if (netxen_nic_check_temp(adapter))
1652 goto do_sched;
1653
1654 if (!adapter->has_link_events) {
1655 netxen_nic_handle_phy_intr(adapter);
1656
1657 if (adapter->link_changed)
1658 goto do_sched;
1659 }
1660
1661 if (netif_running(adapter->netdev))
1662 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1663
1664 return;
1665
1666do_sched:
1667 schedule_work(&adapter->watchdog_task);
1636} 1668}
1637 1669
1638void netxen_watchdog_task(struct work_struct *work) 1670void netxen_watchdog_task(struct work_struct *work)
@@ -1640,11 +1672,13 @@ void netxen_watchdog_task(struct work_struct *work)
1640 struct netxen_adapter *adapter = 1672 struct netxen_adapter *adapter =
1641 container_of(work, struct netxen_adapter, watchdog_task); 1673 container_of(work, struct netxen_adapter, watchdog_task);
1642 1674
1643 if (netxen_nic_check_temp(adapter)) 1675 if (adapter->temp == NX_TEMP_PANIC) {
1676 netxen_nic_thermal_shutdown(adapter);
1644 return; 1677 return;
1678 }
1645 1679
1646 if (!adapter->has_link_events) 1680 if (adapter->link_changed)
1647 netxen_nic_handle_phy_intr(adapter); 1681 netxen_nic_set_link_parameters(adapter);
1648 1682
1649 if (netif_running(adapter->netdev)) 1683 if (netif_running(adapter->netdev))
1650 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 1684 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
@@ -1652,9 +1686,8 @@ void netxen_watchdog_task(struct work_struct *work)
1652 1686
1653static void netxen_tx_timeout(struct net_device *netdev) 1687static void netxen_tx_timeout(struct net_device *netdev)
1654{ 1688{
1655 struct netxen_adapter *adapter = (struct netxen_adapter *) 1689 struct netxen_adapter *adapter = netdev_priv(netdev);
1656 netdev_priv(netdev); 1690 schedule_work(&adapter->tx_timeout_task);
1657 SCHEDULE_WORK(&adapter->tx_timeout_task);
1658} 1691}
1659 1692
1660static void netxen_tx_timeout_task(struct work_struct *work) 1693static void netxen_tx_timeout_task(struct work_struct *work)
@@ -1811,9 +1844,6 @@ static int __init netxen_init_module(void)
1811{ 1844{
1812 printk(KERN_INFO "%s\n", netxen_nic_driver_string); 1845 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
1813 1846
1814 if ((netxen_workq = create_singlethread_workqueue("netxen")) == NULL)
1815 return -ENOMEM;
1816
1817 return pci_register_driver(&netxen_driver); 1847 return pci_register_driver(&netxen_driver);
1818} 1848}
1819 1849
@@ -1822,7 +1852,6 @@ module_init(netxen_init_module);
1822static void __exit netxen_exit_module(void) 1852static void __exit netxen_exit_module(void)
1823{ 1853{
1824 pci_unregister_driver(&netxen_driver); 1854 pci_unregister_driver(&netxen_driver);
1825 destroy_workqueue(netxen_workq);
1826} 1855}
1827 1856
1828module_exit(netxen_exit_module); 1857module_exit(netxen_exit_module);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index a646a445fda9..23e1a0750fe0 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1839,7 +1839,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1839 lp->chip_version = chip_version; 1839 lp->chip_version = chip_version;
1840 lp->msg_enable = pcnet32_debug; 1840 lp->msg_enable = pcnet32_debug;
1841 if ((cards_found >= MAX_UNITS) 1841 if ((cards_found >= MAX_UNITS)
1842 || (options[cards_found] > sizeof(options_mapping))) 1842 || (options[cards_found] >= sizeof(options_mapping)))
1843 lp->options = PCNET32_PORT_ASEL; 1843 lp->options = PCNET32_PORT_ASEL;
1844 else 1844 else
1845 lp->options = options_mapping[options[cards_found]]; 1845 lp->options = options_mapping[options[cards_found]];
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 1c70e999cc50..7567f510eff5 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length)
196/* this enables an interrupt in the interrupt mask register */ 196/* this enables an interrupt in the interrupt mask register */
197#define SMC_ENABLE_INT(lp, x) do { \ 197#define SMC_ENABLE_INT(lp, x) do { \
198 unsigned char mask; \ 198 unsigned char mask; \
199 spin_lock_irq(&lp->lock); \ 199 unsigned long smc_enable_flags; \
200 spin_lock_irqsave(&lp->lock, smc_enable_flags); \
200 mask = SMC_GET_INT_MASK(lp); \ 201 mask = SMC_GET_INT_MASK(lp); \
201 mask |= (x); \ 202 mask |= (x); \
202 SMC_SET_INT_MASK(lp, mask); \ 203 SMC_SET_INT_MASK(lp, mask); \
203 spin_unlock_irq(&lp->lock); \ 204 spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \
204} while (0) 205} while (0)
205 206
206/* this disables an interrupt from the interrupt mask register */ 207/* this disables an interrupt from the interrupt mask register */
207#define SMC_DISABLE_INT(lp, x) do { \ 208#define SMC_DISABLE_INT(lp, x) do { \
208 unsigned char mask; \ 209 unsigned char mask; \
209 spin_lock_irq(&lp->lock); \ 210 unsigned long smc_disable_flags; \
211 spin_lock_irqsave(&lp->lock, smc_disable_flags); \
210 mask = SMC_GET_INT_MASK(lp); \ 212 mask = SMC_GET_INT_MASK(lp); \
211 mask &= ~(x); \ 213 mask &= ~(x); \
212 SMC_SET_INT_MASK(lp, mask); \ 214 SMC_SET_INT_MASK(lp, mask); \
213 spin_unlock_irq(&lp->lock); \ 215 spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \
214} while (0) 216} while (0)
215 217
216/* 218/*
@@ -520,21 +522,21 @@ static inline void smc_rcv(struct net_device *dev)
520 * any other concurrent access and C would always interrupt B. But life 522 * any other concurrent access and C would always interrupt B. But life
521 * isn't that easy in a SMP world... 523 * isn't that easy in a SMP world...
522 */ 524 */
523#define smc_special_trylock(lock) \ 525#define smc_special_trylock(lock, flags) \
524({ \ 526({ \
525 int __ret; \ 527 int __ret; \
526 local_irq_disable(); \ 528 local_irq_save(flags); \
527 __ret = spin_trylock(lock); \ 529 __ret = spin_trylock(lock); \
528 if (!__ret) \ 530 if (!__ret) \
529 local_irq_enable(); \ 531 local_irq_restore(flags); \
530 __ret; \ 532 __ret; \
531}) 533})
532#define smc_special_lock(lock) spin_lock_irq(lock) 534#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
533#define smc_special_unlock(lock) spin_unlock_irq(lock) 535#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
534#else 536#else
535#define smc_special_trylock(lock) (1) 537#define smc_special_trylock(lock, flags) (1)
536#define smc_special_lock(lock) do { } while (0) 538#define smc_special_lock(lock, flags) do { } while (0)
537#define smc_special_unlock(lock) do { } while (0) 539#define smc_special_unlock(lock, flags) do { } while (0)
538#endif 540#endif
539 541
540/* 542/*
@@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data)
548 struct sk_buff *skb; 550 struct sk_buff *skb;
549 unsigned int packet_no, len; 551 unsigned int packet_no, len;
550 unsigned char *buf; 552 unsigned char *buf;
553 unsigned long flags;
551 554
552 DBG(3, "%s: %s\n", dev->name, __func__); 555 DBG(3, "%s: %s\n", dev->name, __func__);
553 556
554 if (!smc_special_trylock(&lp->lock)) { 557 if (!smc_special_trylock(&lp->lock, flags)) {
555 netif_stop_queue(dev); 558 netif_stop_queue(dev);
556 tasklet_schedule(&lp->tx_task); 559 tasklet_schedule(&lp->tx_task);
557 return; 560 return;
@@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data)
559 562
560 skb = lp->pending_tx_skb; 563 skb = lp->pending_tx_skb;
561 if (unlikely(!skb)) { 564 if (unlikely(!skb)) {
562 smc_special_unlock(&lp->lock); 565 smc_special_unlock(&lp->lock, flags);
563 return; 566 return;
564 } 567 }
565 lp->pending_tx_skb = NULL; 568 lp->pending_tx_skb = NULL;
@@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data)
569 printk("%s: Memory allocation failed.\n", dev->name); 572 printk("%s: Memory allocation failed.\n", dev->name);
570 dev->stats.tx_errors++; 573 dev->stats.tx_errors++;
571 dev->stats.tx_fifo_errors++; 574 dev->stats.tx_fifo_errors++;
572 smc_special_unlock(&lp->lock); 575 smc_special_unlock(&lp->lock, flags);
573 goto done; 576 goto done;
574 } 577 }
575 578
@@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data)
608 611
609 /* queue the packet for TX */ 612 /* queue the packet for TX */
610 SMC_SET_MMU_CMD(lp, MC_ENQUEUE); 613 SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
611 smc_special_unlock(&lp->lock); 614 smc_special_unlock(&lp->lock, flags);
612 615
613 dev->trans_start = jiffies; 616 dev->trans_start = jiffies;
614 dev->stats.tx_packets++; 617 dev->stats.tx_packets++;
@@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
633 struct smc_local *lp = netdev_priv(dev); 636 struct smc_local *lp = netdev_priv(dev);
634 void __iomem *ioaddr = lp->base; 637 void __iomem *ioaddr = lp->base;
635 unsigned int numPages, poll_count, status; 638 unsigned int numPages, poll_count, status;
639 unsigned long flags;
636 640
637 DBG(3, "%s: %s\n", dev->name, __func__); 641 DBG(3, "%s: %s\n", dev->name, __func__);
638 642
@@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
658 return 0; 662 return 0;
659 } 663 }
660 664
661 smc_special_lock(&lp->lock); 665 smc_special_lock(&lp->lock, flags);
662 666
663 /* now, try to allocate the memory */ 667 /* now, try to allocate the memory */
664 SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); 668 SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
@@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
676 } 680 }
677 } while (--poll_count); 681 } while (--poll_count);
678 682
679 smc_special_unlock(&lp->lock); 683 smc_special_unlock(&lp->lock, flags);
680 684
681 lp->pending_tx_skb = skb; 685 lp->pending_tx_skb = skb;
682 if (!poll_count) { 686 if (!poll_count) {
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 99a63649f4fc..4cf9a6588751 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -652,8 +652,9 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
652 int entry; 652 int entry;
653 u32 flag; 653 u32 flag;
654 dma_addr_t mapping; 654 dma_addr_t mapping;
655 unsigned long flags;
655 656
656 spin_lock_irq(&tp->lock); 657 spin_lock_irqsave(&tp->lock, flags);
657 658
658 /* Calculate the next Tx descriptor entry. */ 659 /* Calculate the next Tx descriptor entry. */
659 entry = tp->cur_tx % TX_RING_SIZE; 660 entry = tp->cur_tx % TX_RING_SIZE;
@@ -688,7 +689,7 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
688 /* Trigger an immediate transmit demand. */ 689 /* Trigger an immediate transmit demand. */
689 iowrite32(0, tp->base_addr + CSR1); 690 iowrite32(0, tp->base_addr + CSR1);
690 691
691 spin_unlock_irq(&tp->lock); 692 spin_unlock_irqrestore(&tp->lock, flags);
692 693
693 dev->trans_start = jiffies; 694 dev->trans_start = jiffies;
694 695
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 027f7aba26af..42b6c6319bc2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1048,20 +1048,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1048 return err; 1048 return err;
1049} 1049}
1050 1050
1051static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr) 1051static int tun_get_iff(struct net *net, struct tun_struct *tun,
1052 struct ifreq *ifr)
1052{ 1053{
1053 struct tun_struct *tun = tun_get(file);
1054
1055 if (!tun)
1056 return -EBADFD;
1057
1058 DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); 1054 DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);
1059 1055
1060 strcpy(ifr->ifr_name, tun->dev->name); 1056 strcpy(ifr->ifr_name, tun->dev->name);
1061 1057
1062 ifr->ifr_flags = tun_flags(tun); 1058 ifr->ifr_flags = tun_flags(tun);
1063 1059
1064 tun_put(tun);
1065 return 0; 1060 return 0;
1066} 1061}
1067 1062
@@ -1105,8 +1100,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
1105 return 0; 1100 return 0;
1106} 1101}
1107 1102
1108static int tun_chr_ioctl(struct inode *inode, struct file *file, 1103static long tun_chr_ioctl(struct file *file, unsigned int cmd,
1109 unsigned int cmd, unsigned long arg) 1104 unsigned long arg)
1110{ 1105{
1111 struct tun_file *tfile = file->private_data; 1106 struct tun_file *tfile = file->private_data;
1112 struct tun_struct *tun; 1107 struct tun_struct *tun;
@@ -1128,34 +1123,32 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1128 (unsigned int __user*)argp); 1123 (unsigned int __user*)argp);
1129 } 1124 }
1130 1125
1126 rtnl_lock();
1127
1131 tun = __tun_get(tfile); 1128 tun = __tun_get(tfile);
1132 if (cmd == TUNSETIFF && !tun) { 1129 if (cmd == TUNSETIFF && !tun) {
1133 int err;
1134
1135 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 1130 ifr.ifr_name[IFNAMSIZ-1] = '\0';
1136 1131
1137 rtnl_lock(); 1132 ret = tun_set_iff(tfile->net, file, &ifr);
1138 err = tun_set_iff(tfile->net, file, &ifr);
1139 rtnl_unlock();
1140 1133
1141 if (err) 1134 if (ret)
1142 return err; 1135 goto unlock;
1143 1136
1144 if (copy_to_user(argp, &ifr, sizeof(ifr))) 1137 if (copy_to_user(argp, &ifr, sizeof(ifr)))
1145 return -EFAULT; 1138 ret = -EFAULT;
1146 return 0; 1139 goto unlock;
1147 } 1140 }
1148 1141
1149 1142 ret = -EBADFD;
1150 if (!tun) 1143 if (!tun)
1151 return -EBADFD; 1144 goto unlock;
1152 1145
1153 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); 1146 DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);
1154 1147
1155 ret = 0; 1148 ret = 0;
1156 switch (cmd) { 1149 switch (cmd) {
1157 case TUNGETIFF: 1150 case TUNGETIFF:
1158 ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr); 1151 ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1159 if (ret) 1152 if (ret)
1160 break; 1153 break;
1161 1154
@@ -1201,7 +1194,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1201 1194
1202 case TUNSETLINK: 1195 case TUNSETLINK:
1203 /* Only allow setting the type when the interface is down */ 1196 /* Only allow setting the type when the interface is down */
1204 rtnl_lock();
1205 if (tun->dev->flags & IFF_UP) { 1197 if (tun->dev->flags & IFF_UP) {
1206 DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", 1198 DBG(KERN_INFO "%s: Linktype set failed because interface is up\n",
1207 tun->dev->name); 1199 tun->dev->name);
@@ -1211,7 +1203,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1211 DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); 1203 DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type);
1212 ret = 0; 1204 ret = 0;
1213 } 1205 }
1214 rtnl_unlock();
1215 break; 1206 break;
1216 1207
1217#ifdef TUN_DEBUG 1208#ifdef TUN_DEBUG
@@ -1220,9 +1211,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1220 break; 1211 break;
1221#endif 1212#endif
1222 case TUNSETOFFLOAD: 1213 case TUNSETOFFLOAD:
1223 rtnl_lock();
1224 ret = set_offload(tun->dev, arg); 1214 ret = set_offload(tun->dev, arg);
1225 rtnl_unlock();
1226 break; 1215 break;
1227 1216
1228 case TUNSETTXFILTER: 1217 case TUNSETTXFILTER:
@@ -1230,9 +1219,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1230 ret = -EINVAL; 1219 ret = -EINVAL;
1231 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 1220 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1232 break; 1221 break;
1233 rtnl_lock();
1234 ret = update_filter(&tun->txflt, (void __user *)arg); 1222 ret = update_filter(&tun->txflt, (void __user *)arg);
1235 rtnl_unlock();
1236 break; 1223 break;
1237 1224
1238 case SIOCGIFHWADDR: 1225 case SIOCGIFHWADDR:
@@ -1248,9 +1235,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1248 DBG(KERN_DEBUG "%s: set hw address: %pM\n", 1235 DBG(KERN_DEBUG "%s: set hw address: %pM\n",
1249 tun->dev->name, ifr.ifr_hwaddr.sa_data); 1236 tun->dev->name, ifr.ifr_hwaddr.sa_data);
1250 1237
1251 rtnl_lock();
1252 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 1238 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1253 rtnl_unlock();
1254 break; 1239 break;
1255 1240
1256 case TUNGETSNDBUF: 1241 case TUNGETSNDBUF:
@@ -1273,7 +1258,10 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1273 break; 1258 break;
1274 }; 1259 };
1275 1260
1276 tun_put(tun); 1261unlock:
1262 rtnl_unlock();
1263 if (tun)
1264 tun_put(tun);
1277 return ret; 1265 return ret;
1278} 1266}
1279 1267
@@ -1361,7 +1349,7 @@ static const struct file_operations tun_fops = {
1361 .write = do_sync_write, 1349 .write = do_sync_write,
1362 .aio_write = tun_chr_aio_write, 1350 .aio_write = tun_chr_aio_write,
1363 .poll = tun_chr_poll, 1351 .poll = tun_chr_poll,
1364 .ioctl = tun_chr_ioctl, 1352 .unlocked_ioctl = tun_chr_ioctl,
1365 .open = tun_chr_open, 1353 .open = tun_chr_open,
1366 .release = tun_chr_close, 1354 .release = tun_chr_close,
1367 .fasync = tun_chr_fasync 1355 .fasync = tun_chr_fasync
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 3b957e6412ee..8a7b8c7bd781 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3111,10 +3111,11 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3111 u8 __iomem *bd; /* BD pointer */ 3111 u8 __iomem *bd; /* BD pointer */
3112 u32 bd_status; 3112 u32 bd_status;
3113 u8 txQ = 0; 3113 u8 txQ = 0;
3114 unsigned long flags;
3114 3115
3115 ugeth_vdbg("%s: IN", __func__); 3116 ugeth_vdbg("%s: IN", __func__);
3116 3117
3117 spin_lock_irq(&ugeth->lock); 3118 spin_lock_irqsave(&ugeth->lock, flags);
3118 3119
3119 dev->stats.tx_bytes += skb->len; 3120 dev->stats.tx_bytes += skb->len;
3120 3121
@@ -3171,7 +3172,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3171 uccf = ugeth->uccf; 3172 uccf = ugeth->uccf;
3172 out_be16(uccf->p_utodr, UCC_FAST_TOD); 3173 out_be16(uccf->p_utodr, UCC_FAST_TOD);
3173#endif 3174#endif
3174 spin_unlock_irq(&ugeth->lock); 3175 spin_unlock_irqrestore(&ugeth->lock, flags);
3175 3176
3176 return 0; 3177 return 0;
3177} 3178}
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index c7467823cd1c..f968c834ff63 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -250,6 +250,8 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
250 DEFAULT_GPIO_RESET ) 250 DEFAULT_GPIO_RESET )
251PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913, 251PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
252 DEFAULT_GPIO_RESET | PEGASUS_II ) 252 DEFAULT_GPIO_RESET | PEGASUS_II )
253PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
254 DEFAULT_GPIO_RESET | PEGASUS_II )
253PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a, 255PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
254 DEFAULT_GPIO_RESET) 256 DEFAULT_GPIO_RESET)
255PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002, 257PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 88c30a58b4bd..934f7671650a 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1218,6 +1218,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1218 struct rhine_private *rp = netdev_priv(dev); 1218 struct rhine_private *rp = netdev_priv(dev);
1219 void __iomem *ioaddr = rp->base; 1219 void __iomem *ioaddr = rp->base;
1220 unsigned entry; 1220 unsigned entry;
1221 unsigned long flags;
1221 1222
1222 /* Caution: the write order is important here, set the field 1223 /* Caution: the write order is important here, set the field
1223 with the "ownership" bits last. */ 1224 with the "ownership" bits last. */
@@ -1261,7 +1262,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1261 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1262 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1262 1263
1263 /* lock eth irq */ 1264 /* lock eth irq */
1264 spin_lock_irq(&rp->lock); 1265 spin_lock_irqsave(&rp->lock, flags);
1265 wmb(); 1266 wmb();
1266 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1267 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1267 wmb(); 1268 wmb();
@@ -1280,7 +1281,7 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1280 1281
1281 dev->trans_start = jiffies; 1282 dev->trans_start = jiffies;
1282 1283
1283 spin_unlock_irq(&rp->lock); 1284 spin_unlock_irqrestore(&rp->lock, flags);
1284 1285
1285 if (debug > 4) { 1286 if (debug > 4) {
1286 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", 1287 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 3ba35956327a..cee08a1e497a 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1778,7 +1778,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
1778 * mode 1778 * mode
1779 */ 1779 */
1780 if (vptr->rev_id < REV_ID_VT3216_A0) { 1780 if (vptr->rev_id < REV_ID_VT3216_A0) {
1781 if (vptr->mii_status | VELOCITY_DUPLEX_FULL) 1781 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1782 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR); 1782 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1783 else 1783 else
1784 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR); 1784 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2a6e81d5b579..bbedf03a2124 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -70,6 +70,9 @@ struct virtnet_info
70 struct sk_buff_head recv; 70 struct sk_buff_head recv;
71 struct sk_buff_head send; 71 struct sk_buff_head send;
72 72
73 /* Work struct for refilling if we run low on memory. */
74 struct delayed_work refill;
75
73 /* Chain pages by the private ptr. */ 76 /* Chain pages by the private ptr. */
74 struct page *pages; 77 struct page *pages;
75}; 78};
@@ -273,19 +276,22 @@ drop:
273 dev_kfree_skb(skb); 276 dev_kfree_skb(skb);
274} 277}
275 278
276static void try_fill_recv_maxbufs(struct virtnet_info *vi) 279static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
277{ 280{
278 struct sk_buff *skb; 281 struct sk_buff *skb;
279 struct scatterlist sg[2+MAX_SKB_FRAGS]; 282 struct scatterlist sg[2+MAX_SKB_FRAGS];
280 int num, err, i; 283 int num, err, i;
284 bool oom = false;
281 285
282 sg_init_table(sg, 2+MAX_SKB_FRAGS); 286 sg_init_table(sg, 2+MAX_SKB_FRAGS);
283 for (;;) { 287 for (;;) {
284 struct virtio_net_hdr *hdr; 288 struct virtio_net_hdr *hdr;
285 289
286 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); 290 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
287 if (unlikely(!skb)) 291 if (unlikely(!skb)) {
292 oom = true;
288 break; 293 break;
294 }
289 295
290 skb_reserve(skb, NET_IP_ALIGN); 296 skb_reserve(skb, NET_IP_ALIGN);
291 skb_put(skb, MAX_PACKET_LEN); 297 skb_put(skb, MAX_PACKET_LEN);
@@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
296 if (vi->big_packets) { 302 if (vi->big_packets) {
297 for (i = 0; i < MAX_SKB_FRAGS; i++) { 303 for (i = 0; i < MAX_SKB_FRAGS; i++) {
298 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 304 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
299 f->page = get_a_page(vi, GFP_ATOMIC); 305 f->page = get_a_page(vi, gfp);
300 if (!f->page) 306 if (!f->page)
301 break; 307 break;
302 308
@@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
325 if (unlikely(vi->num > vi->max)) 331 if (unlikely(vi->num > vi->max))
326 vi->max = vi->num; 332 vi->max = vi->num;
327 vi->rvq->vq_ops->kick(vi->rvq); 333 vi->rvq->vq_ops->kick(vi->rvq);
334 return !oom;
328} 335}
329 336
330static void try_fill_recv(struct virtnet_info *vi) 337/* Returns false if we couldn't fill entirely (OOM). */
338static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
331{ 339{
332 struct sk_buff *skb; 340 struct sk_buff *skb;
333 struct scatterlist sg[1]; 341 struct scatterlist sg[1];
334 int err; 342 int err;
343 bool oom = false;
335 344
336 if (!vi->mergeable_rx_bufs) { 345 if (!vi->mergeable_rx_bufs)
337 try_fill_recv_maxbufs(vi); 346 return try_fill_recv_maxbufs(vi, gfp);
338 return;
339 }
340 347
341 for (;;) { 348 for (;;) {
342 skb_frag_t *f; 349 skb_frag_t *f;
343 350
344 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 351 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
345 if (unlikely(!skb)) 352 if (unlikely(!skb)) {
353 oom = true;
346 break; 354 break;
355 }
347 356
348 skb_reserve(skb, NET_IP_ALIGN); 357 skb_reserve(skb, NET_IP_ALIGN);
349 358
350 f = &skb_shinfo(skb)->frags[0]; 359 f = &skb_shinfo(skb)->frags[0];
351 f->page = get_a_page(vi, GFP_ATOMIC); 360 f->page = get_a_page(vi, gfp);
352 if (!f->page) { 361 if (!f->page) {
362 oom = true;
353 kfree_skb(skb); 363 kfree_skb(skb);
354 break; 364 break;
355 } 365 }
@@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi)
373 if (unlikely(vi->num > vi->max)) 383 if (unlikely(vi->num > vi->max))
374 vi->max = vi->num; 384 vi->max = vi->num;
375 vi->rvq->vq_ops->kick(vi->rvq); 385 vi->rvq->vq_ops->kick(vi->rvq);
386 return !oom;
376} 387}
377 388
378static void skb_recv_done(struct virtqueue *rvq) 389static void skb_recv_done(struct virtqueue *rvq)
@@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq)
385 } 396 }
386} 397}
387 398
399static void refill_work(struct work_struct *work)
400{
401 struct virtnet_info *vi;
402 bool still_empty;
403
404 vi = container_of(work, struct virtnet_info, refill.work);
405 napi_disable(&vi->napi);
406 try_fill_recv(vi, GFP_KERNEL);
407 still_empty = (vi->num == 0);
408 napi_enable(&vi->napi);
409
410 /* In theory, this can happen: if we don't get any buffers in
411 * we will *never* try to fill again. */
412 if (still_empty)
413 schedule_delayed_work(&vi->refill, HZ/2);
414}
415
388static int virtnet_poll(struct napi_struct *napi, int budget) 416static int virtnet_poll(struct napi_struct *napi, int budget)
389{ 417{
390 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 418 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
@@ -400,10 +428,10 @@ again:
400 received++; 428 received++;
401 } 429 }
402 430
403 /* FIXME: If we oom and completely run out of inbufs, we need 431 if (vi->num < vi->max / 2) {
404 * to start a timer trying to fill more. */ 432 if (!try_fill_recv(vi, GFP_ATOMIC))
405 if (vi->num < vi->max / 2) 433 schedule_delayed_work(&vi->refill, 0);
406 try_fill_recv(vi); 434 }
407 435
408 /* Out of packets? */ 436 /* Out of packets? */
409 if (received < budget) { 437 if (received < budget) {
@@ -893,6 +921,7 @@ static int virtnet_probe(struct virtio_device *vdev)
893 vi->vdev = vdev; 921 vi->vdev = vdev;
894 vdev->priv = vi; 922 vdev->priv = vi;
895 vi->pages = NULL; 923 vi->pages = NULL;
924 INIT_DELAYED_WORK(&vi->refill, refill_work);
896 925
897 /* If they give us a callback when all buffers are done, we don't need 926 /* If they give us a callback when all buffers are done, we don't need
898 * the timer. */ 927 * the timer. */
@@ -941,7 +970,7 @@ static int virtnet_probe(struct virtio_device *vdev)
941 } 970 }
942 971
943 /* Last of all, set up some receive buffers. */ 972 /* Last of all, set up some receive buffers. */
944 try_fill_recv(vi); 973 try_fill_recv(vi, GFP_KERNEL);
945 974
946 /* If we didn't even get one input buffer, we're useless. */ 975 /* If we didn't even get one input buffer, we're useless. */
947 if (vi->num == 0) { 976 if (vi->num == 0) {
@@ -958,6 +987,7 @@ static int virtnet_probe(struct virtio_device *vdev)
958 987
959unregister: 988unregister:
960 unregister_netdev(dev); 989 unregister_netdev(dev);
990 cancel_delayed_work_sync(&vi->refill);
961free_vqs: 991free_vqs:
962 vdev->config->del_vqs(vdev); 992 vdev->config->del_vqs(vdev);
963free: 993free:
@@ -986,6 +1016,7 @@ static void virtnet_remove(struct virtio_device *vdev)
986 BUG_ON(vi->num != 0); 1016 BUG_ON(vi->num != 0);
987 1017
988 unregister_netdev(vi->dev); 1018 unregister_netdev(vi->dev);
1019 cancel_delayed_work_sync(&vi->refill);
989 1020
990 vdev->config->del_vqs(vi->vdev); 1021 vdev->config->del_vqs(vi->vdev);
991 1022
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 9d38cf60a0db..88c3d8573869 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1967,13 +1967,14 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
1967 int ret; 1967 int ret;
1968 1968
1969 mutex_lock(&ar->mutex); 1969 mutex_lock(&ar->mutex);
1970 if ((param) && !(queue > __AR9170_NUM_TXQ)) { 1970 if (queue < __AR9170_NUM_TXQ) {
1971 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]], 1971 memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
1972 param, sizeof(*param)); 1972 param, sizeof(*param));
1973 1973
1974 ret = ar9170_set_qos(ar); 1974 ret = ar9170_set_qos(ar);
1975 } else 1975 } else {
1976 ret = -EINVAL; 1976 ret = -EINVAL;
1977 }
1977 1978
1978 mutex_unlock(&ar->mutex); 1979 mutex_unlock(&ar->mutex);
1979 return ret; 1980 return ret;
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 754b1f8d8da9..007eb85fc67e 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -598,11 +598,15 @@ static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
598 598
599 err = request_firmware(&aru->init_values, "ar9170-1.fw", 599 err = request_firmware(&aru->init_values, "ar9170-1.fw",
600 &aru->udev->dev); 600 &aru->udev->dev);
601 if (err) {
602 dev_err(&aru->udev->dev, "file with init values not found.\n");
603 return err;
604 }
601 605
602 err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev); 606 err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
603 if (err) { 607 if (err) {
604 release_firmware(aru->init_values); 608 release_firmware(aru->init_values);
605 dev_err(&aru->udev->dev, "file with init values not found.\n"); 609 dev_err(&aru->udev->dev, "firmware file not found.\n");
606 return err; 610 return err;
607 } 611 }
608 612
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 44c29b3f6728..6dcac73b4d29 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -6226,7 +6226,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
6226 }; 6226 };
6227 6227
6228 u8 channel; 6228 u8 channel;
6229 while (channel_index < IPW_SCAN_CHANNELS) { 6229 while (channel_index < IPW_SCAN_CHANNELS - 1) {
6230 channel = 6230 channel =
6231 priv->speed_scan[priv->speed_scan_pos]; 6231 priv->speed_scan[priv->speed_scan_pos];
6232 if (channel == 0) { 6232 if (channel == 0) {
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index d6997371c27e..b9b374119033 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -1,7 +1,6 @@
1/* Copyright (C) 2006, Red Hat, Inc. */ 1/* Copyright (C) 2006, Red Hat, Inc. */
2 2
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/kernel.h>
5#include <linux/etherdevice.h> 4#include <linux/etherdevice.h>
6#include <linux/ieee80211.h> 5#include <linux/ieee80211.h>
7#include <linux/if_arp.h> 6#include <linux/if_arp.h>
@@ -44,21 +43,21 @@ static int get_common_rates(struct lbs_private *priv,
44 u16 *rates_size) 43 u16 *rates_size)
45{ 44{
46 u8 *card_rates = lbs_bg_rates; 45 u8 *card_rates = lbs_bg_rates;
46 size_t num_card_rates = sizeof(lbs_bg_rates);
47 int ret = 0, i, j; 47 int ret = 0, i, j;
48 u8 tmp[(ARRAY_SIZE(lbs_bg_rates) - 1) * (*rates_size - 1)]; 48 u8 tmp[30];
49 size_t tmp_size = 0; 49 size_t tmp_size = 0;
50 50
51 /* For each rate in card_rates that exists in rate1, copy to tmp */ 51 /* For each rate in card_rates that exists in rate1, copy to tmp */
52 for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && card_rates[i]; i++) { 52 for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
53 for (j = 0; j < *rates_size && rates[j]; j++) { 53 for (j = 0; rates[j] && (j < *rates_size); j++) {
54 if (rates[j] == card_rates[i]) 54 if (rates[j] == card_rates[i])
55 tmp[tmp_size++] = card_rates[i]; 55 tmp[tmp_size++] = card_rates[i];
56 } 56 }
57 } 57 }
58 58
59 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size); 59 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
60 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, 60 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
61 ARRAY_SIZE(lbs_bg_rates));
62 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size); 61 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
63 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate); 62 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
64 63
@@ -70,7 +69,10 @@ static int get_common_rates(struct lbs_private *priv,
70 lbs_pr_alert("Previously set fixed data rate %#x isn't " 69 lbs_pr_alert("Previously set fixed data rate %#x isn't "
71 "compatible with the network.\n", priv->cur_rate); 70 "compatible with the network.\n", priv->cur_rate);
72 ret = -1; 71 ret = -1;
72 goto done;
73 } 73 }
74 ret = 0;
75
74done: 76done:
75 memset(rates, 0, *rates_size); 77 memset(rates, 0, *rates_size);
76 *rates_size = min_t(int, tmp_size, *rates_size); 78 *rates_size = min_t(int, tmp_size, *rates_size);
@@ -320,7 +322,7 @@ static int lbs_associate(struct lbs_private *priv,
320 rates = (struct mrvl_ie_rates_param_set *) pos; 322 rates = (struct mrvl_ie_rates_param_set *) pos;
321 rates->header.type = cpu_to_le16(TLV_TYPE_RATES); 323 rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
322 memcpy(&rates->rates, &bss->rates, MAX_RATES); 324 memcpy(&rates->rates, &bss->rates, MAX_RATES);
323 tmplen = min_t(u16, ARRAY_SIZE(rates->rates), MAX_RATES); 325 tmplen = MAX_RATES;
324 if (get_common_rates(priv, rates->rates, &tmplen)) { 326 if (get_common_rates(priv, rates->rates, &tmplen)) {
325 ret = -1; 327 ret = -1;
326 goto done; 328 goto done;
@@ -596,7 +598,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
596 598
597 /* Copy Data rates from the rates recorded in scan response */ 599 /* Copy Data rates from the rates recorded in scan response */
598 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates)); 600 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
599 ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), MAX_RATES); 601 ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES);
600 memcpy(cmd.bss.rates, bss->rates, ratesize); 602 memcpy(cmd.bss.rates, bss->rates, ratesize);
601 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) { 603 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
602 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n"); 604 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index 0a2e29140add..c8a1998d4744 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -56,8 +56,8 @@ struct rxpd {
56 u8 bss_type; 56 u8 bss_type;
57 /* BSS number */ 57 /* BSS number */
58 u8 bss_num; 58 u8 bss_num;
59 } bss; 59 } __attribute__ ((packed)) bss;
60 } u; 60 } __attribute__ ((packed)) u;
61 61
62 /* SNR */ 62 /* SNR */
63 u8 snr; 63 u8 snr;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index a263d5c84c08..83967afe0821 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -261,7 +261,7 @@ struct mwl8k_vif {
261 */ 261 */
262}; 262};
263 263
264#define MWL8K_VIF(_vif) (struct mwl8k_vif *)(&((_vif)->drv_priv)) 264#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
265 265
266static const struct ieee80211_channel mwl8k_channels[] = { 266static const struct ieee80211_channel mwl8k_channels[] = {
267 { .center_freq = 2412, .hw_value = 1, }, 267 { .center_freq = 2412, .hw_value = 1, },
@@ -1012,6 +1012,8 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
1012 rmb(); 1012 rmb();
1013 1013
1014 skb = rxq->rx_skb[rxq->rx_head]; 1014 skb = rxq->rx_skb[rxq->rx_head];
1015 if (skb == NULL)
1016 break;
1015 rxq->rx_skb[rxq->rx_head] = NULL; 1017 rxq->rx_skb[rxq->rx_head] = NULL;
1016 1018
1017 rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS; 1019 rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS;
@@ -1591,6 +1593,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1591 timeout = wait_for_completion_timeout(&cmd_wait, 1593 timeout = wait_for_completion_timeout(&cmd_wait,
1592 msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS)); 1594 msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
1593 1595
1596 pci_unmap_single(priv->pdev, dma_addr, dma_size,
1597 PCI_DMA_BIDIRECTIONAL);
1598
1594 result = &cmd->result; 1599 result = &cmd->result;
1595 if (!timeout) { 1600 if (!timeout) {
1596 spin_lock_irq(&priv->fw_lock); 1601 spin_lock_irq(&priv->fw_lock);
@@ -1610,8 +1615,6 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1610 *result); 1615 *result);
1611 } 1616 }
1612 1617
1613 pci_unmap_single(priv->pdev, dma_addr, dma_size,
1614 PCI_DMA_BIDIRECTIONAL);
1615 return rc; 1618 return rc;
1616} 1619}
1617 1620
@@ -1654,18 +1657,18 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
1654 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); 1657 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
1655 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 1658 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1656 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma); 1659 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma);
1657 cmd->num_tx_queues = MWL8K_TX_QUEUES; 1660 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1658 for (i = 0; i < MWL8K_TX_QUEUES; i++) 1661 for (i = 0; i < MWL8K_TX_QUEUES; i++)
1659 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma); 1662 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma);
1660 cmd->num_tx_desc_per_queue = MWL8K_TX_DESCS; 1663 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1661 cmd->total_rx_desc = MWL8K_RX_DESCS; 1664 cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS);
1662 1665
1663 rc = mwl8k_post_cmd(hw, &cmd->header); 1666 rc = mwl8k_post_cmd(hw, &cmd->header);
1664 1667
1665 if (!rc) { 1668 if (!rc) {
1666 SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); 1669 SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
1667 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 1670 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1668 priv->fw_rev = cmd->fw_rev; 1671 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
1669 priv->hw_rev = cmd->hw_rev; 1672 priv->hw_rev = cmd->hw_rev;
1670 priv->region_code = le16_to_cpu(cmd->region_code); 1673 priv->region_code = le16_to_cpu(cmd->region_code);
1671 } 1674 }
@@ -3216,15 +3219,19 @@ static int mwl8k_configure_filter_wt(struct work_struct *wt)
3216 struct dev_addr_list *mclist = worker->mclist; 3219 struct dev_addr_list *mclist = worker->mclist;
3217 3220
3218 struct mwl8k_priv *priv = hw->priv; 3221 struct mwl8k_priv *priv = hw->priv;
3219 struct mwl8k_vif *mv_vif;
3220 int rc = 0; 3222 int rc = 0;
3221 3223
3222 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 3224 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
3223 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) 3225 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
3224 rc = mwl8k_cmd_set_pre_scan(hw); 3226 rc = mwl8k_cmd_set_pre_scan(hw);
3225 else { 3227 else {
3226 mv_vif = MWL8K_VIF(priv->vif); 3228 u8 *bssid;
3227 rc = mwl8k_cmd_set_post_scan(hw, mv_vif->bssid); 3229
3230 bssid = "\x00\x00\x00\x00\x00\x00";
3231 if (priv->vif != NULL)
3232 bssid = MWL8K_VIF(priv->vif)->bssid;
3233
3234 rc = mwl8k_cmd_set_post_scan(hw, bssid);
3228 } 3235 }
3229 } 3236 }
3230 3237
@@ -3726,6 +3733,8 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3726 3733
3727 ieee80211_stop_queues(hw); 3734 ieee80211_stop_queues(hw);
3728 3735
3736 ieee80211_unregister_hw(hw);
3737
3729 /* Remove tx reclaim tasklet */ 3738 /* Remove tx reclaim tasklet */
3730 tasklet_kill(&priv->tx_reclaim_task); 3739 tasklet_kill(&priv->tx_reclaim_task);
3731 3740
@@ -3739,8 +3748,6 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3739 for (i = 0; i < MWL8K_TX_QUEUES; i++) 3748 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3740 mwl8k_txq_reclaim(hw, i, 1); 3749 mwl8k_txq_reclaim(hw, i, 1);
3741 3750
3742 ieee80211_unregister_hw(hw);
3743
3744 for (i = 0; i < MWL8K_TX_QUEUES; i++) 3751 for (i = 0; i < MWL8K_TX_QUEUES; i++)
3745 mwl8k_txq_deinit(hw, i); 3752 mwl8k_txq_deinit(hw, i);
3746 3753
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 632fac86a308..b3946272c72e 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -70,7 +70,7 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
70 int err = 0; 70 int err = 0;
71 u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE]; 71 u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE];
72 72
73 if ((key < 0) || (key > 4)) 73 if ((key < 0) || (key >= 4))
74 return -EINVAL; 74 return -EINVAL;
75 75
76 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, 76 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index a498dde024e1..49c9e2c1433d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -849,13 +849,15 @@ struct rt2x00_dev {
849static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev, 849static inline void rt2x00_rf_read(struct rt2x00_dev *rt2x00dev,
850 const unsigned int word, u32 *data) 850 const unsigned int word, u32 *data)
851{ 851{
852 *data = rt2x00dev->rf[word]; 852 BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
853 *data = rt2x00dev->rf[word - 1];
853} 854}
854 855
855static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev, 856static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev,
856 const unsigned int word, u32 data) 857 const unsigned int word, u32 data)
857{ 858{
858 rt2x00dev->rf[word] = data; 859 BUG_ON(word < 1 || word > rt2x00dev->ops->rf_size / sizeof(u32));
860 rt2x00dev->rf[word - 1] = data;
859} 861}
860 862
861/* 863/*
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 294250e294dd..87a95588a8e3 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -869,6 +869,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
869 priv->aifsn[3] = 3; /* AIFSN[AC_BE] */ 869 priv->aifsn[3] = 3; /* AIFSN[AC_BE] */
870 rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0); 870 rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0);
871 871
872 /* ENEDCA flag must always be set, transmit issues? */
873 rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
874
872 return 0; 875 return 0;
873} 876}
874 877
@@ -1173,13 +1176,16 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
1173 rtl818x_iowrite8(priv, &priv->map->BSSID[i], 1176 rtl818x_iowrite8(priv, &priv->map->BSSID[i],
1174 info->bssid[i]); 1177 info->bssid[i]);
1175 1178
1179 if (priv->is_rtl8187b)
1180 reg = RTL818X_MSR_ENEDCA;
1181 else
1182 reg = 0;
1183
1176 if (is_valid_ether_addr(info->bssid)) { 1184 if (is_valid_ether_addr(info->bssid)) {
1177 reg = RTL818X_MSR_INFRA; 1185 reg |= RTL818X_MSR_INFRA;
1178 if (priv->is_rtl8187b)
1179 reg |= RTL818X_MSR_ENEDCA;
1180 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 1186 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
1181 } else { 1187 } else {
1182 reg = RTL818X_MSR_NO_LINK; 1188 reg |= RTL818X_MSR_NO_LINK;
1183 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 1189 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
1184 } 1190 }
1185 1191
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index a07580138e81..c2fd6187773f 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -346,7 +346,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
346static int yellowfin_open(struct net_device *dev); 346static int yellowfin_open(struct net_device *dev);
347static void yellowfin_timer(unsigned long data); 347static void yellowfin_timer(unsigned long data);
348static void yellowfin_tx_timeout(struct net_device *dev); 348static void yellowfin_tx_timeout(struct net_device *dev);
349static void yellowfin_init_ring(struct net_device *dev); 349static int yellowfin_init_ring(struct net_device *dev);
350static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); 350static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); 351static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
352static int yellowfin_rx(struct net_device *dev); 352static int yellowfin_rx(struct net_device *dev);
@@ -573,19 +573,24 @@ static int yellowfin_open(struct net_device *dev)
573{ 573{
574 struct yellowfin_private *yp = netdev_priv(dev); 574 struct yellowfin_private *yp = netdev_priv(dev);
575 void __iomem *ioaddr = yp->base; 575 void __iomem *ioaddr = yp->base;
576 int i; 576 int i, ret;
577 577
578 /* Reset the chip. */ 578 /* Reset the chip. */
579 iowrite32(0x80000000, ioaddr + DMACtrl); 579 iowrite32(0x80000000, ioaddr + DMACtrl);
580 580
581 i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); 581 ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
582 if (i) return i; 582 if (ret)
583 return ret;
583 584
584 if (yellowfin_debug > 1) 585 if (yellowfin_debug > 1)
585 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", 586 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
586 dev->name, dev->irq); 587 dev->name, dev->irq);
587 588
588 yellowfin_init_ring(dev); 589 ret = yellowfin_init_ring(dev);
590 if (ret) {
591 free_irq(dev->irq, dev);
592 return ret;
593 }
589 594
590 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); 595 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
591 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); 596 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -725,10 +730,10 @@ static void yellowfin_tx_timeout(struct net_device *dev)
725} 730}
726 731
727/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 732/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
728static void yellowfin_init_ring(struct net_device *dev) 733static int yellowfin_init_ring(struct net_device *dev)
729{ 734{
730 struct yellowfin_private *yp = netdev_priv(dev); 735 struct yellowfin_private *yp = netdev_priv(dev);
731 int i; 736 int i, j;
732 737
733 yp->tx_full = 0; 738 yp->tx_full = 0;
734 yp->cur_rx = yp->cur_tx = 0; 739 yp->cur_rx = yp->cur_tx = 0;
@@ -753,6 +758,11 @@ static void yellowfin_init_ring(struct net_device *dev)
753 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, 758 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
754 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 759 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
755 } 760 }
761 if (i != RX_RING_SIZE) {
762 for (j = 0; j < i; j++)
763 dev_kfree_skb(yp->rx_skbuff[j]);
764 return -ENOMEM;
765 }
756 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); 766 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
757 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 767 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
758 768
@@ -769,8 +779,6 @@ static void yellowfin_init_ring(struct net_device *dev)
769 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); 779 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
770#else 780#else
771{ 781{
772 int j;
773
774 /* Tx ring needs a pair of descriptors, the second for the status. */ 782 /* Tx ring needs a pair of descriptors, the second for the status. */
775 for (i = 0; i < TX_RING_SIZE; i++) { 783 for (i = 0; i < TX_RING_SIZE; i++) {
776 j = 2*i; 784 j = 2*i;
@@ -805,7 +813,7 @@ static void yellowfin_init_ring(struct net_device *dev)
805} 813}
806#endif 814#endif
807 yp->tx_tail_desc = &yp->tx_status[0]; 815 yp->tx_tail_desc = &yp->tx_status[0];
808 return; 816 return 0;
809} 817}
810 818
811static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) 819static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 37c84e3b8be0..81c753a617ab 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -120,6 +120,9 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
120 for (i = ARRAY_SIZE(cards)-1; i >= 0; i--) 120 for (i = ARRAY_SIZE(cards)-1; i >= 0; i--)
121 if (z->id == cards[i].id) 121 if (z->id == cards[i].id)
122 break; 122 break;
123 if (i < 0)
124 return -ENODEV;
125
123 board = z->resource.start; 126 board = z->resource.start;
124 ioaddr = board+cards[i].offset; 127 ioaddr = board+cards[i].offset;
125 dev = alloc_ei_netdev(); 128 dev = alloc_ei_netdev();
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e3a87210e947..e03fe98f0619 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -598,6 +598,29 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno,
598} 598}
599 599
600/** 600/**
601 * pci_sriov_resource_alignment - get resource alignment for VF BAR
602 * @dev: the PCI device
603 * @resno: the resource number
604 *
605 * Returns the alignment of the VF BAR found in the SR-IOV capability.
606 * This is not the same as the resource size which is defined as
607 * the VF BAR size multiplied by the number of VFs. The alignment
608 * is just the VF BAR size.
609 */
610int pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
611{
612 struct resource tmp;
613 enum pci_bar_type type;
614 int reg = pci_iov_resource_bar(dev, resno, &type);
615
616 if (!reg)
617 return 0;
618
619 __pci_read_base(dev, type, &tmp, reg);
620 return resource_alignment(&tmp);
621}
622
623/**
601 * pci_restore_iov_state - restore the state of the IOV capability 624 * pci_restore_iov_state - restore the state of the IOV capability
602 * @dev: the PCI device 625 * @dev: the PCI device
603 */ 626 */
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d76c4c85367e..f99bc7f089f1 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -508,7 +508,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
508 return error; 508 return error;
509 } 509 }
510 510
511 return pci_dev->state_saved ? pci_restore_state(pci_dev) : 0; 511 return pci_restore_state(pci_dev);
512} 512}
513 513
514static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev) 514static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index dbd0f947f497..7b70312181d7 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -846,6 +846,8 @@ pci_restore_state(struct pci_dev *dev)
846 int i; 846 int i;
847 u32 val; 847 u32 val;
848 848
849 if (!dev->state_saved)
850 return 0;
849 /* PCI Express register must be restored first */ 851 /* PCI Express register must be restored first */
850 pci_restore_pcie_state(dev); 852 pci_restore_pcie_state(dev);
851 853
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f73bcbedf37c..5ff4d25bf0e9 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -243,6 +243,7 @@ extern int pci_iov_init(struct pci_dev *dev);
243extern void pci_iov_release(struct pci_dev *dev); 243extern void pci_iov_release(struct pci_dev *dev);
244extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, 244extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
245 enum pci_bar_type *type); 245 enum pci_bar_type *type);
246extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
246extern void pci_restore_iov_state(struct pci_dev *dev); 247extern void pci_restore_iov_state(struct pci_dev *dev);
247extern int pci_iov_bus_range(struct pci_bus *bus); 248extern int pci_iov_bus_range(struct pci_bus *bus);
248 249
@@ -298,4 +299,16 @@ static inline int pci_ats_enabled(struct pci_dev *dev)
298} 299}
299#endif /* CONFIG_PCI_IOV */ 300#endif /* CONFIG_PCI_IOV */
300 301
302static inline int pci_resource_alignment(struct pci_dev *dev,
303 struct resource *res)
304{
305#ifdef CONFIG_PCI_IOV
306 int resno = res - dev->resource;
307
308 if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
309 return pci_sriov_resource_alignment(dev, resno);
310#endif
311 return resource_alignment(res);
312}
313
301#endif /* DRIVERS_PCI_H */ 314#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index b636e245445d..7c443b4583ab 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -25,7 +25,7 @@
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/cache.h> 26#include <linux/cache.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28#include "pci.h"
29 29
30static void pbus_assign_resources_sorted(const struct pci_bus *bus) 30static void pbus_assign_resources_sorted(const struct pci_bus *bus)
31{ 31{
@@ -384,7 +384,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
384 continue; 384 continue;
385 r_size = resource_size(r); 385 r_size = resource_size(r);
386 /* For bridges size != alignment */ 386 /* For bridges size != alignment */
387 align = resource_alignment(r); 387 align = pci_resource_alignment(dev, r);
388 order = __ffs(align) - 20; 388 order = __ffs(align) - 20;
389 if (order > 11) { 389 if (order > 11) {
390 dev_warn(&dev->dev, "BAR %d bad alignment %llx: " 390 dev_warn(&dev->dev, "BAR %d bad alignment %llx: "
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 1898c7b47907..88cdd1a937d6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -144,7 +144,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
144 144
145 size = resource_size(res); 145 size = resource_size(res);
146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; 146 min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
147 align = resource_alignment(res); 147 align = pci_resource_alignment(dev, res);
148 148
149 /* First, try exact prefetching match.. */ 149 /* First, try exact prefetching match.. */
150 ret = pci_bus_alloc_resource(bus, res, size, align, min, 150 ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -178,7 +178,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
178 struct pci_bus *bus; 178 struct pci_bus *bus;
179 int ret; 179 int ret;
180 180
181 align = resource_alignment(res); 181 align = pci_resource_alignment(dev, res);
182 if (!align) { 182 if (!align) {
183 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " 183 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
184 "alignment) %pR flags %#lx\n", 184 "alignment) %pR flags %#lx\n",
@@ -259,7 +259,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
259 if (!(r->flags) || r->parent) 259 if (!(r->flags) || r->parent)
260 continue; 260 continue;
261 261
262 r_align = resource_alignment(r); 262 r_align = pci_resource_alignment(dev, r);
263 if (!r_align) { 263 if (!r_align) {
264 dev_warn(&dev->dev, "BAR %d: bogus alignment " 264 dev_warn(&dev->dev, "BAR %d: bogus alignment "
265 "%pR flags %#lx\n", 265 "%pR flags %#lx\n",
@@ -271,7 +271,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
271 struct resource_list *ln = list->next; 271 struct resource_list *ln = list->next;
272 272
273 if (ln) 273 if (ln)
274 align = resource_alignment(ln->res); 274 align = pci_resource_alignment(ln->dev, ln->res);
275 275
276 if (r_align > align) { 276 if (r_align > align) {
277 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); 277 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 81d31ea507d1..51c0a8bee414 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -335,6 +335,7 @@ static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
335 if (hci_result != HCI_SUCCESS) { 335 if (hci_result != HCI_SUCCESS) {
336 /* Can't do anything useful */ 336 /* Can't do anything useful */
337 mutex_unlock(&dev->mutex); 337 mutex_unlock(&dev->mutex);
338 return;
338 } 339 }
339 340
340 new_rfk_state = value; 341 new_rfk_state = value;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 043b208d971d..f215a5919192 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -270,7 +270,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
270 acpi_status status; 270 acpi_status status;
271 struct acpi_object_list input; 271 struct acpi_object_list input;
272 union acpi_object params[3]; 272 union acpi_object params[3];
273 char method[4] = "WM"; 273 char method[5] = "WM";
274 274
275 if (!find_guid(guid_string, &wblock)) 275 if (!find_guid(guid_string, &wblock))
276 return AE_ERROR; 276 return AE_ERROR;
@@ -328,8 +328,8 @@ struct acpi_buffer *out)
328 acpi_status status, wc_status = AE_ERROR; 328 acpi_status status, wc_status = AE_ERROR;
329 struct acpi_object_list input, wc_input; 329 struct acpi_object_list input, wc_input;
330 union acpi_object wc_params[1], wq_params[1]; 330 union acpi_object wc_params[1], wq_params[1];
331 char method[4]; 331 char method[5];
332 char wc_method[4] = "WC"; 332 char wc_method[5] = "WC";
333 333
334 if (!guid_string || !out) 334 if (!guid_string || !out)
335 return AE_BAD_PARAMETER; 335 return AE_BAD_PARAMETER;
@@ -410,7 +410,7 @@ const struct acpi_buffer *in)
410 acpi_handle handle; 410 acpi_handle handle;
411 struct acpi_object_list input; 411 struct acpi_object_list input;
412 union acpi_object params[2]; 412 union acpi_object params[2];
413 char method[4] = "WS"; 413 char method[5] = "WS";
414 414
415 if (!guid_string || !in) 415 if (!guid_string || !in)
416 return AE_BAD_DATA; 416 return AE_BAD_DATA;
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index ac8cc8cea1e3..fea17e7805e9 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -244,7 +244,7 @@ int pps_register_cdev(struct pps_device *pps)
244 } 244 }
245 pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL, 245 pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL,
246 "pps%d", pps->id); 246 "pps%d", pps->id);
247 if (err) 247 if (IS_ERR(pps->dev))
248 goto del_cdev; 248 goto del_cdev;
249 dev_set_drvdata(pps->dev, pps); 249 dev_set_drvdata(pps->dev, pps);
250 250
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 749836668655..3f62dd50bbbe 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2135,9 +2135,9 @@ static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2135 struct dasd_device *base; 2135 struct dasd_device *base;
2136 2136
2137 block = bdev->bd_disk->private_data; 2137 block = bdev->bd_disk->private_data;
2138 base = block->base;
2139 if (!block) 2138 if (!block)
2140 return -ENODEV; 2139 return -ENODEV;
2140 base = block->base;
2141 2141
2142 if (!base->discipline || 2142 if (!base->discipline ||
2143 !base->discipline->fill_geometry) 2143 !base->discipline->fill_geometry)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 3c57c1a18bb8..d593bc76afe3 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -772,10 +772,8 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
772 cdev = io_subchannel_allocate_dev(sch); 772 cdev = io_subchannel_allocate_dev(sch);
773 if (!IS_ERR(cdev)) { 773 if (!IS_ERR(cdev)) {
774 ret = io_subchannel_initialize_dev(sch, cdev); 774 ret = io_subchannel_initialize_dev(sch, cdev);
775 if (ret) { 775 if (ret)
776 kfree(cdev);
777 cdev = ERR_PTR(ret); 776 cdev = ERR_PTR(ret);
778 }
779 } 777 }
780 return cdev; 778 return cdev;
781} 779}
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 15dab96d05e3..7c815d3327f7 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -537,8 +537,12 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
537 } 537 }
538 if (temp_index != 0 && fan_index != 0) { 538 if (temp_index != 0 && fan_index != 0) {
539 kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld"); 539 kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld");
540 if (IS_ERR(kenvctrld_task)) 540 if (IS_ERR(kenvctrld_task)) {
541 return PTR_ERR(kenvctrld_task); 541 int err = PTR_ERR(kenvctrld_task);
542
543 kenvctrld_task = NULL;
544 return err;
545 }
542 } 546 }
543 547
544 return 0; 548 return 0;
@@ -561,7 +565,8 @@ void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp)
561 struct bbc_cpu_temperature *tp, *tpos; 565 struct bbc_cpu_temperature *tp, *tpos;
562 struct bbc_fan_control *fp, *fpos; 566 struct bbc_fan_control *fp, *fpos;
563 567
564 kthread_stop(kenvctrld_task); 568 if (kenvctrld_task)
569 kthread_stop(kenvctrld_task);
565 570
566 list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { 571 list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) {
567 list_del(&tp->bp_list); 572 list_del(&tp->bp_list);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index f3da592f7bcc..35a13867495e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -119,6 +119,64 @@ _base_fault_reset_work(struct work_struct *work)
119 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 119 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
120} 120}
121 121
122/**
123 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
124 * @ioc: pointer to scsi command object
125 * Context: sleep.
126 *
127 * Return nothing.
128 */
129void
130mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
131{
132 unsigned long flags;
133
134 if (ioc->fault_reset_work_q)
135 return;
136
137 /* initialize fault polling */
138 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
139 snprintf(ioc->fault_reset_work_q_name,
140 sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
141 ioc->fault_reset_work_q =
142 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
143 if (!ioc->fault_reset_work_q) {
144 printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
145 ioc->name, __func__, __LINE__);
146 return;
147 }
148 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
149 if (ioc->fault_reset_work_q)
150 queue_delayed_work(ioc->fault_reset_work_q,
151 &ioc->fault_reset_work,
152 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
153 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
154}
155
156/**
157 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
158 * @ioc: pointer to scsi command object
159 * Context: sleep.
160 *
161 * Return nothing.
162 */
163void
164mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
165{
166 unsigned long flags;
167 struct workqueue_struct *wq;
168
169 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
170 wq = ioc->fault_reset_work_q;
171 ioc->fault_reset_work_q = NULL;
172 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
173 if (wq) {
174 if (!cancel_delayed_work(&ioc->fault_reset_work))
175 flush_workqueue(wq);
176 destroy_workqueue(wq);
177 }
178}
179
122#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 180#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
123/** 181/**
124 * _base_sas_ioc_info - verbose translation of the ioc status 182 * _base_sas_ioc_info - verbose translation of the ioc status
@@ -440,6 +498,10 @@ _base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
440 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 498 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
441 return; 499 return;
442 500
501 /* each nexus loss loginfo */
502 if (log_info == 0x31170000)
503 return;
504
443 /* eat the loginfos associated with task aborts */ 505 /* eat the loginfos associated with task aborts */
444 if (ioc->ignore_loginfos && (log_info == 30050000 || log_info == 506 if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
445 0x31140000 || log_info == 0x31130000)) 507 0x31140000 || log_info == 0x31130000))
@@ -1109,7 +1171,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1109 } 1171 }
1110 } 1172 }
1111 1173
1112 pci_set_drvdata(pdev, ioc->shost);
1113 _base_mask_interrupts(ioc); 1174 _base_mask_interrupts(ioc);
1114 r = _base_enable_msix(ioc); 1175 r = _base_enable_msix(ioc);
1115 if (r) 1176 if (r)
@@ -1132,7 +1193,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1132 ioc->pci_irq = -1; 1193 ioc->pci_irq = -1;
1133 pci_release_selected_regions(ioc->pdev, ioc->bars); 1194 pci_release_selected_regions(ioc->pdev, ioc->bars);
1134 pci_disable_device(pdev); 1195 pci_disable_device(pdev);
1135 pci_set_drvdata(pdev, NULL);
1136 return r; 1196 return r;
1137} 1197}
1138 1198
@@ -3191,7 +3251,6 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
3191 ioc->chip_phys = 0; 3251 ioc->chip_phys = 0;
3192 pci_release_selected_regions(ioc->pdev, ioc->bars); 3252 pci_release_selected_regions(ioc->pdev, ioc->bars);
3193 pci_disable_device(pdev); 3253 pci_disable_device(pdev);
3194 pci_set_drvdata(pdev, NULL);
3195 return; 3254 return;
3196} 3255}
3197 3256
@@ -3205,7 +3264,6 @@ int
3205mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) 3264mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3206{ 3265{
3207 int r, i; 3266 int r, i;
3208 unsigned long flags;
3209 3267
3210 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 3268 dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3211 __func__)); 3269 __func__));
@@ -3214,6 +3272,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3214 if (r) 3272 if (r)
3215 return r; 3273 return r;
3216 3274
3275 pci_set_drvdata(ioc->pdev, ioc->shost);
3217 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 3276 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3218 if (r) 3277 if (r)
3219 goto out_free_resources; 3278 goto out_free_resources;
@@ -3288,23 +3347,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3288 if (r) 3347 if (r)
3289 goto out_free_resources; 3348 goto out_free_resources;
3290 3349
3291 /* initialize fault polling */ 3350 mpt2sas_base_start_watchdog(ioc);
3292 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
3293 snprintf(ioc->fault_reset_work_q_name,
3294 sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
3295 ioc->fault_reset_work_q =
3296 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
3297 if (!ioc->fault_reset_work_q) {
3298 printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
3299 ioc->name, __func__, __LINE__);
3300 goto out_free_resources;
3301 }
3302 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3303 if (ioc->fault_reset_work_q)
3304 queue_delayed_work(ioc->fault_reset_work_q,
3305 &ioc->fault_reset_work,
3306 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
3307 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3308 return 0; 3351 return 0;
3309 3352
3310 out_free_resources: 3353 out_free_resources:
@@ -3312,6 +3355,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3312 ioc->remove_host = 1; 3355 ioc->remove_host = 1;
3313 mpt2sas_base_free_resources(ioc); 3356 mpt2sas_base_free_resources(ioc);
3314 _base_release_memory_pools(ioc); 3357 _base_release_memory_pools(ioc);
3358 pci_set_drvdata(ioc->pdev, NULL);
3315 kfree(ioc->tm_cmds.reply); 3359 kfree(ioc->tm_cmds.reply);
3316 kfree(ioc->transport_cmds.reply); 3360 kfree(ioc->transport_cmds.reply);
3317 kfree(ioc->config_cmds.reply); 3361 kfree(ioc->config_cmds.reply);
@@ -3337,22 +3381,14 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3337void 3381void
3338mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) 3382mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
3339{ 3383{
3340 unsigned long flags;
3341 struct workqueue_struct *wq;
3342 3384
3343 dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name, 3385 dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3344 __func__)); 3386 __func__));
3345 3387
3346 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 3388 mpt2sas_base_stop_watchdog(ioc);
3347 wq = ioc->fault_reset_work_q;
3348 ioc->fault_reset_work_q = NULL;
3349 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3350 if (!cancel_delayed_work(&ioc->fault_reset_work))
3351 flush_workqueue(wq);
3352 destroy_workqueue(wq);
3353
3354 mpt2sas_base_free_resources(ioc); 3389 mpt2sas_base_free_resources(ioc);
3355 _base_release_memory_pools(ioc); 3390 _base_release_memory_pools(ioc);
3391 pci_set_drvdata(ioc->pdev, NULL);
3356 kfree(ioc->pfacts); 3392 kfree(ioc->pfacts);
3357 kfree(ioc->ctl_cmds.reply); 3393 kfree(ioc->ctl_cmds.reply);
3358 kfree(ioc->base_cmds.reply); 3394 kfree(ioc->base_cmds.reply);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 286c185fa9e4..acdcff150a35 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,10 +69,10 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "01.100.03.00" 72#define MPT2SAS_DRIVER_VERSION "01.100.04.00"
73#define MPT2SAS_MAJOR_VERSION 01 73#define MPT2SAS_MAJOR_VERSION 01
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 03 75#define MPT2SAS_BUILD_VERSION 04
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
77 77
78/* 78/*
@@ -673,6 +673,8 @@ typedef void (*MPT_CALLBACK)(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID,
673 673
674/* base shared API */ 674/* base shared API */
675extern struct list_head mpt2sas_ioc_list; 675extern struct list_head mpt2sas_ioc_list;
676void mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc);
677void mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc);
676 678
677int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc); 679int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc);
678void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc); 680void mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 58cfb97846f7..6ddee161beb3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -236,17 +236,25 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
236 Mpi2ConfigRequest_t *config_request; 236 Mpi2ConfigRequest_t *config_request;
237 int r; 237 int r;
238 u8 retry_count; 238 u8 retry_count;
239 u8 issue_reset; 239 u8 issue_host_reset = 0;
240 u16 wait_state_count; 240 u16 wait_state_count;
241 241
242 mutex_lock(&ioc->config_cmds.mutex);
242 if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) { 243 if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
243 printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n", 244 printk(MPT2SAS_ERR_FMT "%s: config_cmd in use\n",
244 ioc->name, __func__); 245 ioc->name, __func__);
246 mutex_unlock(&ioc->config_cmds.mutex);
245 return -EAGAIN; 247 return -EAGAIN;
246 } 248 }
247 retry_count = 0; 249 retry_count = 0;
248 250
249 retry_config: 251 retry_config:
252 if (retry_count) {
253 if (retry_count > 2) /* attempt only 2 retries */
254 return -EFAULT;
255 printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n",
256 ioc->name, __func__, retry_count);
257 }
250 wait_state_count = 0; 258 wait_state_count = 0;
251 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 259 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
252 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 260 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
@@ -254,8 +262,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
254 printk(MPT2SAS_ERR_FMT 262 printk(MPT2SAS_ERR_FMT
255 "%s: failed due to ioc not operational\n", 263 "%s: failed due to ioc not operational\n",
256 ioc->name, __func__); 264 ioc->name, __func__);
257 ioc->config_cmds.status = MPT2_CMD_NOT_USED; 265 r = -EFAULT;
258 return -EFAULT; 266 goto out;
259 } 267 }
260 ssleep(1); 268 ssleep(1);
261 ioc_state = mpt2sas_base_get_iocstate(ioc, 1); 269 ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
@@ -271,8 +279,8 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
271 if (!smid) { 279 if (!smid) {
272 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n", 280 printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
273 ioc->name, __func__); 281 ioc->name, __func__);
274 ioc->config_cmds.status = MPT2_CMD_NOT_USED; 282 r = -EAGAIN;
275 return -EAGAIN; 283 goto out;
276 } 284 }
277 285
278 r = 0; 286 r = 0;
@@ -292,9 +300,15 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
292 ioc->name, __func__); 300 ioc->name, __func__);
293 _debug_dump_mf(mpi_request, 301 _debug_dump_mf(mpi_request,
294 sizeof(Mpi2ConfigRequest_t)/4); 302 sizeof(Mpi2ConfigRequest_t)/4);
295 if (!(ioc->config_cmds.status & MPT2_CMD_RESET)) 303 retry_count++;
296 issue_reset = 1; 304 if (ioc->config_cmds.smid == smid)
297 goto issue_host_reset; 305 mpt2sas_base_free_smid(ioc, smid);
306 if ((ioc->shost_recovery) ||
307 (ioc->config_cmds.status & MPT2_CMD_RESET))
308 goto retry_config;
309 issue_host_reset = 1;
310 r = -EFAULT;
311 goto out;
298 } 312 }
299 if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID) 313 if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
300 memcpy(mpi_reply, ioc->config_cmds.reply, 314 memcpy(mpi_reply, ioc->config_cmds.reply,
@@ -302,21 +316,13 @@ _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
302 if (retry_count) 316 if (retry_count)
303 printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n", 317 printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n",
304 ioc->name, __func__); 318 ioc->name, __func__);
319out:
305 ioc->config_cmds.status = MPT2_CMD_NOT_USED; 320 ioc->config_cmds.status = MPT2_CMD_NOT_USED;
306 return r; 321 mutex_unlock(&ioc->config_cmds.mutex);
307 322 if (issue_host_reset)
308 issue_host_reset:
309 if (issue_reset)
310 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, 323 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
311 FORCE_BIG_HAMMER); 324 FORCE_BIG_HAMMER);
312 ioc->config_cmds.status = MPT2_CMD_NOT_USED; 325 return r;
313 if (!retry_count) {
314 printk(MPT2SAS_INFO_FMT "%s: attempting retry\n",
315 ioc->name, __func__);
316 retry_count++;
317 goto retry_config;
318 }
319 return -EFAULT;
320} 326}
321 327
322/** 328/**
@@ -375,7 +381,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
375 int r; 381 int r;
376 struct config_request mem; 382 struct config_request mem;
377 383
378 mutex_lock(&ioc->config_cmds.mutex);
379 memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t)); 384 memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t));
380 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 385 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
381 mpi_request.Function = MPI2_FUNCTION_CONFIG; 386 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -417,7 +422,6 @@ mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
417 _config_free_config_dma_memory(ioc, &mem); 422 _config_free_config_dma_memory(ioc, &mem);
418 423
419 out: 424 out:
420 mutex_unlock(&ioc->config_cmds.mutex);
421 return r; 425 return r;
422} 426}
423 427
@@ -438,7 +442,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
438 int r; 442 int r;
439 struct config_request mem; 443 struct config_request mem;
440 444
441 mutex_lock(&ioc->config_cmds.mutex);
442 memset(config_page, 0, sizeof(Mpi2BiosPage2_t)); 445 memset(config_page, 0, sizeof(Mpi2BiosPage2_t));
443 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 446 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
444 mpi_request.Function = MPI2_FUNCTION_CONFIG; 447 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -480,7 +483,6 @@ mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc,
480 _config_free_config_dma_memory(ioc, &mem); 483 _config_free_config_dma_memory(ioc, &mem);
481 484
482 out: 485 out:
483 mutex_unlock(&ioc->config_cmds.mutex);
484 return r; 486 return r;
485} 487}
486 488
@@ -501,7 +503,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
501 int r; 503 int r;
502 struct config_request mem; 504 struct config_request mem;
503 505
504 mutex_lock(&ioc->config_cmds.mutex);
505 memset(config_page, 0, sizeof(Mpi2BiosPage3_t)); 506 memset(config_page, 0, sizeof(Mpi2BiosPage3_t));
506 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 507 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
507 mpi_request.Function = MPI2_FUNCTION_CONFIG; 508 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -543,7 +544,6 @@ mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
543 _config_free_config_dma_memory(ioc, &mem); 544 _config_free_config_dma_memory(ioc, &mem);
544 545
545 out: 546 out:
546 mutex_unlock(&ioc->config_cmds.mutex);
547 return r; 547 return r;
548} 548}
549 549
@@ -564,7 +564,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
564 int r; 564 int r;
565 struct config_request mem; 565 struct config_request mem;
566 566
567 mutex_lock(&ioc->config_cmds.mutex);
568 memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t)); 567 memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t));
569 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 568 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
570 mpi_request.Function = MPI2_FUNCTION_CONFIG; 569 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -606,7 +605,6 @@ mpt2sas_config_get_iounit_pg0(struct MPT2SAS_ADAPTER *ioc,
606 _config_free_config_dma_memory(ioc, &mem); 605 _config_free_config_dma_memory(ioc, &mem);
607 606
608 out: 607 out:
609 mutex_unlock(&ioc->config_cmds.mutex);
610 return r; 608 return r;
611} 609}
612 610
@@ -627,7 +625,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
627 int r; 625 int r;
628 struct config_request mem; 626 struct config_request mem;
629 627
630 mutex_lock(&ioc->config_cmds.mutex);
631 memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t)); 628 memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t));
632 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 629 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
633 mpi_request.Function = MPI2_FUNCTION_CONFIG; 630 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -669,7 +666,6 @@ mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
669 _config_free_config_dma_memory(ioc, &mem); 666 _config_free_config_dma_memory(ioc, &mem);
670 667
671 out: 668 out:
672 mutex_unlock(&ioc->config_cmds.mutex);
673 return r; 669 return r;
674} 670}
675 671
@@ -690,7 +686,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
690 int r; 686 int r;
691 struct config_request mem; 687 struct config_request mem;
692 688
693 mutex_lock(&ioc->config_cmds.mutex);
694 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 689 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
695 mpi_request.Function = MPI2_FUNCTION_CONFIG; 690 mpi_request.Function = MPI2_FUNCTION_CONFIG;
696 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 691 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -732,7 +727,6 @@ mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
732 _config_free_config_dma_memory(ioc, &mem); 727 _config_free_config_dma_memory(ioc, &mem);
733 728
734 out: 729 out:
735 mutex_unlock(&ioc->config_cmds.mutex);
736 return r; 730 return r;
737} 731}
738 732
@@ -753,7 +747,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
753 int r; 747 int r;
754 struct config_request mem; 748 struct config_request mem;
755 749
756 mutex_lock(&ioc->config_cmds.mutex);
757 memset(config_page, 0, sizeof(Mpi2IOCPage8_t)); 750 memset(config_page, 0, sizeof(Mpi2IOCPage8_t));
758 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 751 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
759 mpi_request.Function = MPI2_FUNCTION_CONFIG; 752 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -795,7 +788,6 @@ mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc,
795 _config_free_config_dma_memory(ioc, &mem); 788 _config_free_config_dma_memory(ioc, &mem);
796 789
797 out: 790 out:
798 mutex_unlock(&ioc->config_cmds.mutex);
799 return r; 791 return r;
800} 792}
801 793
@@ -818,7 +810,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
818 int r; 810 int r;
819 struct config_request mem; 811 struct config_request mem;
820 812
821 mutex_lock(&ioc->config_cmds.mutex);
822 memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t)); 813 memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t));
823 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 814 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
824 mpi_request.Function = MPI2_FUNCTION_CONFIG; 815 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -863,7 +854,6 @@ mpt2sas_config_get_sas_device_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
863 _config_free_config_dma_memory(ioc, &mem); 854 _config_free_config_dma_memory(ioc, &mem);
864 855
865 out: 856 out:
866 mutex_unlock(&ioc->config_cmds.mutex);
867 return r; 857 return r;
868} 858}
869 859
@@ -886,7 +876,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
886 int r; 876 int r;
887 struct config_request mem; 877 struct config_request mem;
888 878
889 mutex_lock(&ioc->config_cmds.mutex);
890 memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t)); 879 memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t));
891 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 880 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
892 mpi_request.Function = MPI2_FUNCTION_CONFIG; 881 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -931,7 +920,6 @@ mpt2sas_config_get_sas_device_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
931 _config_free_config_dma_memory(ioc, &mem); 920 _config_free_config_dma_memory(ioc, &mem);
932 921
933 out: 922 out:
934 mutex_unlock(&ioc->config_cmds.mutex);
935 return r; 923 return r;
936} 924}
937 925
@@ -953,7 +941,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
953 Mpi2ConfigReply_t mpi_reply; 941 Mpi2ConfigReply_t mpi_reply;
954 Mpi2SasIOUnitPage0_t config_page; 942 Mpi2SasIOUnitPage0_t config_page;
955 943
956 mutex_lock(&ioc->config_cmds.mutex);
957 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 944 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
958 mpi_request.Function = MPI2_FUNCTION_CONFIG; 945 mpi_request.Function = MPI2_FUNCTION_CONFIG;
959 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; 946 mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1002,7 +989,6 @@ mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys)
1002 _config_free_config_dma_memory(ioc, &mem); 989 _config_free_config_dma_memory(ioc, &mem);
1003 990
1004 out: 991 out:
1005 mutex_unlock(&ioc->config_cmds.mutex);
1006 return r; 992 return r;
1007} 993}
1008 994
@@ -1026,8 +1012,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1026 Mpi2ConfigRequest_t mpi_request; 1012 Mpi2ConfigRequest_t mpi_request;
1027 int r; 1013 int r;
1028 struct config_request mem; 1014 struct config_request mem;
1029
1030 mutex_lock(&ioc->config_cmds.mutex);
1031 memset(config_page, 0, sz); 1015 memset(config_page, 0, sz);
1032 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1016 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1033 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1017 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1070,7 +1054,6 @@ mpt2sas_config_get_sas_iounit_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1070 _config_free_config_dma_memory(ioc, &mem); 1054 _config_free_config_dma_memory(ioc, &mem);
1071 1055
1072 out: 1056 out:
1073 mutex_unlock(&ioc->config_cmds.mutex);
1074 return r; 1057 return r;
1075} 1058}
1076 1059
@@ -1095,7 +1078,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1095 int r; 1078 int r;
1096 struct config_request mem; 1079 struct config_request mem;
1097 1080
1098 mutex_lock(&ioc->config_cmds.mutex);
1099 memset(config_page, 0, sz); 1081 memset(config_page, 0, sz);
1100 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1082 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1101 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1083 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1138,7 +1120,6 @@ mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1138 _config_free_config_dma_memory(ioc, &mem); 1120 _config_free_config_dma_memory(ioc, &mem);
1139 1121
1140 out: 1122 out:
1141 mutex_unlock(&ioc->config_cmds.mutex);
1142 return r; 1123 return r;
1143} 1124}
1144 1125
@@ -1161,7 +1142,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1161 int r; 1142 int r;
1162 struct config_request mem; 1143 struct config_request mem;
1163 1144
1164 mutex_lock(&ioc->config_cmds.mutex);
1165 memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t)); 1145 memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t));
1166 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1146 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1167 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1147 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1206,7 +1186,6 @@ mpt2sas_config_get_expander_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1206 _config_free_config_dma_memory(ioc, &mem); 1186 _config_free_config_dma_memory(ioc, &mem);
1207 1187
1208 out: 1188 out:
1209 mutex_unlock(&ioc->config_cmds.mutex);
1210 return r; 1189 return r;
1211} 1190}
1212 1191
@@ -1230,7 +1209,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1230 int r; 1209 int r;
1231 struct config_request mem; 1210 struct config_request mem;
1232 1211
1233 mutex_lock(&ioc->config_cmds.mutex);
1234 memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t)); 1212 memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t));
1235 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1213 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1236 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1214 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1277,7 +1255,6 @@ mpt2sas_config_get_expander_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1277 _config_free_config_dma_memory(ioc, &mem); 1255 _config_free_config_dma_memory(ioc, &mem);
1278 1256
1279 out: 1257 out:
1280 mutex_unlock(&ioc->config_cmds.mutex);
1281 return r; 1258 return r;
1282} 1259}
1283 1260
@@ -1300,7 +1277,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1300 int r; 1277 int r;
1301 struct config_request mem; 1278 struct config_request mem;
1302 1279
1303 mutex_lock(&ioc->config_cmds.mutex);
1304 memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t)); 1280 memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t));
1305 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1281 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1306 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1282 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1345,7 +1321,6 @@ mpt2sas_config_get_enclosure_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1345 _config_free_config_dma_memory(ioc, &mem); 1321 _config_free_config_dma_memory(ioc, &mem);
1346 1322
1347 out: 1323 out:
1348 mutex_unlock(&ioc->config_cmds.mutex);
1349 return r; 1324 return r;
1350} 1325}
1351 1326
@@ -1367,7 +1342,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1367 int r; 1342 int r;
1368 struct config_request mem; 1343 struct config_request mem;
1369 1344
1370 mutex_lock(&ioc->config_cmds.mutex);
1371 memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t)); 1345 memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t));
1372 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1346 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1373 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1347 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1413,7 +1387,6 @@ mpt2sas_config_get_phy_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1413 _config_free_config_dma_memory(ioc, &mem); 1387 _config_free_config_dma_memory(ioc, &mem);
1414 1388
1415 out: 1389 out:
1416 mutex_unlock(&ioc->config_cmds.mutex);
1417 return r; 1390 return r;
1418} 1391}
1419 1392
@@ -1435,7 +1408,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1435 int r; 1408 int r;
1436 struct config_request mem; 1409 struct config_request mem;
1437 1410
1438 mutex_lock(&ioc->config_cmds.mutex);
1439 memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t)); 1411 memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t));
1440 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1412 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1441 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1413 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1481,7 +1453,6 @@ mpt2sas_config_get_phy_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1481 _config_free_config_dma_memory(ioc, &mem); 1453 _config_free_config_dma_memory(ioc, &mem);
1482 1454
1483 out: 1455 out:
1484 mutex_unlock(&ioc->config_cmds.mutex);
1485 return r; 1456 return r;
1486} 1457}
1487 1458
@@ -1505,7 +1476,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
1505 int r; 1476 int r;
1506 struct config_request mem; 1477 struct config_request mem;
1507 1478
1508 mutex_lock(&ioc->config_cmds.mutex);
1509 memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t)); 1479 memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t));
1510 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1480 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1511 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1481 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1548,7 +1518,6 @@ mpt2sas_config_get_raid_volume_pg1(struct MPT2SAS_ADAPTER *ioc,
1548 _config_free_config_dma_memory(ioc, &mem); 1518 _config_free_config_dma_memory(ioc, &mem);
1549 1519
1550 out: 1520 out:
1551 mutex_unlock(&ioc->config_cmds.mutex);
1552 return r; 1521 return r;
1553} 1522}
1554 1523
@@ -1572,7 +1541,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1572 struct config_request mem; 1541 struct config_request mem;
1573 u16 ioc_status; 1542 u16 ioc_status;
1574 1543
1575 mutex_lock(&ioc->config_cmds.mutex);
1576 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1544 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1577 *num_pds = 0; 1545 *num_pds = 0;
1578 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1546 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1620,7 +1588,6 @@ mpt2sas_config_get_number_pds(struct MPT2SAS_ADAPTER *ioc, u16 handle,
1620 _config_free_config_dma_memory(ioc, &mem); 1588 _config_free_config_dma_memory(ioc, &mem);
1621 1589
1622 out: 1590 out:
1623 mutex_unlock(&ioc->config_cmds.mutex);
1624 return r; 1591 return r;
1625} 1592}
1626 1593
@@ -1645,7 +1612,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
1645 int r; 1612 int r;
1646 struct config_request mem; 1613 struct config_request mem;
1647 1614
1648 mutex_lock(&ioc->config_cmds.mutex);
1649 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1615 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1650 memset(config_page, 0, sz); 1616 memset(config_page, 0, sz);
1651 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1617 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1687,7 +1653,6 @@ mpt2sas_config_get_raid_volume_pg0(struct MPT2SAS_ADAPTER *ioc,
1687 _config_free_config_dma_memory(ioc, &mem); 1653 _config_free_config_dma_memory(ioc, &mem);
1688 1654
1689 out: 1655 out:
1690 mutex_unlock(&ioc->config_cmds.mutex);
1691 return r; 1656 return r;
1692} 1657}
1693 1658
@@ -1711,7 +1676,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1711 int r; 1676 int r;
1712 struct config_request mem; 1677 struct config_request mem;
1713 1678
1714 mutex_lock(&ioc->config_cmds.mutex);
1715 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1679 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1716 memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t)); 1680 memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t));
1717 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1681 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1754,7 +1718,6 @@ mpt2sas_config_get_phys_disk_pg0(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
1754 _config_free_config_dma_memory(ioc, &mem); 1718 _config_free_config_dma_memory(ioc, &mem);
1755 1719
1756 out: 1720 out:
1757 mutex_unlock(&ioc->config_cmds.mutex);
1758 return r; 1721 return r;
1759} 1722}
1760 1723
@@ -1778,7 +1741,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1778 struct config_request mem; 1741 struct config_request mem;
1779 u16 ioc_status; 1742 u16 ioc_status;
1780 1743
1781 mutex_lock(&ioc->config_cmds.mutex);
1782 *volume_handle = 0; 1744 *volume_handle = 0;
1783 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); 1745 memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
1784 mpi_request.Function = MPI2_FUNCTION_CONFIG; 1746 mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -1842,7 +1804,6 @@ mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
1842 _config_free_config_dma_memory(ioc, &mem); 1804 _config_free_config_dma_memory(ioc, &mem);
1843 1805
1844 out: 1806 out:
1845 mutex_unlock(&ioc->config_cmds.mutex);
1846 return r; 1807 return r;
1847} 1808}
1848 1809
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 2a01a5f2a84d..2e9a4445596f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2767,6 +2767,10 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
2767 char *desc_ioc_state = NULL; 2767 char *desc_ioc_state = NULL;
2768 char *desc_scsi_status = NULL; 2768 char *desc_scsi_status = NULL;
2769 char *desc_scsi_state = ioc->tmp_string; 2769 char *desc_scsi_state = ioc->tmp_string;
2770 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
2771
2772 if (log_info == 0x31170000)
2773 return;
2770 2774
2771 switch (ioc_status) { 2775 switch (ioc_status) {
2772 case MPI2_IOCSTATUS_SUCCESS: 2776 case MPI2_IOCSTATUS_SUCCESS:
@@ -3426,7 +3430,7 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3426 __le64 sas_address; 3430 __le64 sas_address;
3427 int i; 3431 int i;
3428 unsigned long flags; 3432 unsigned long flags;
3429 struct _sas_port *mpt2sas_port; 3433 struct _sas_port *mpt2sas_port = NULL;
3430 int rc = 0; 3434 int rc = 0;
3431 3435
3432 if (!handle) 3436 if (!handle)
@@ -3518,12 +3522,20 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3518 &expander_pg1, i, handle))) { 3522 &expander_pg1, i, handle))) {
3519 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 3523 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3520 ioc->name, __FILE__, __LINE__, __func__); 3524 ioc->name, __FILE__, __LINE__, __func__);
3521 continue; 3525 rc = -1;
3526 goto out_fail;
3522 } 3527 }
3523 sas_expander->phy[i].handle = handle; 3528 sas_expander->phy[i].handle = handle;
3524 sas_expander->phy[i].phy_id = i; 3529 sas_expander->phy[i].phy_id = i;
3525 mpt2sas_transport_add_expander_phy(ioc, &sas_expander->phy[i], 3530
3526 expander_pg1, sas_expander->parent_dev); 3531 if ((mpt2sas_transport_add_expander_phy(ioc,
3532 &sas_expander->phy[i], expander_pg1,
3533 sas_expander->parent_dev))) {
3534 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
3535 ioc->name, __FILE__, __LINE__, __func__);
3536 rc = -1;
3537 goto out_fail;
3538 }
3527 } 3539 }
3528 3540
3529 if (sas_expander->enclosure_handle) { 3541 if (sas_expander->enclosure_handle) {
@@ -3540,8 +3552,9 @@ _scsih_expander_add(struct MPT2SAS_ADAPTER *ioc, u16 handle)
3540 3552
3541 out_fail: 3553 out_fail:
3542 3554
3543 if (sas_expander) 3555 if (mpt2sas_port)
3544 kfree(sas_expander->phy); 3556 mpt2sas_transport_port_remove(ioc, sas_expander->sas_address,
3557 sas_expander->parent_handle);
3545 kfree(sas_expander); 3558 kfree(sas_expander);
3546 return rc; 3559 return rc;
3547} 3560}
@@ -3663,12 +3676,11 @@ _scsih_add_device(struct MPT2SAS_ADAPTER *ioc, u16 handle, u8 phy_num, u8 is_pd)
3663 sas_device->hidden_raid_component = is_pd; 3676 sas_device->hidden_raid_component = is_pd;
3664 3677
3665 /* get enclosure_logical_id */ 3678 /* get enclosure_logical_id */
3666 if (!(mpt2sas_config_get_enclosure_pg0(ioc, &mpi_reply, &enclosure_pg0, 3679 if (sas_device->enclosure_handle && !(mpt2sas_config_get_enclosure_pg0(
3667 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, 3680 ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
3668 sas_device->enclosure_handle))) { 3681 sas_device->enclosure_handle)))
3669 sas_device->enclosure_logical_id = 3682 sas_device->enclosure_logical_id =
3670 le64_to_cpu(enclosure_pg0.EnclosureLogicalID); 3683 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
3671 }
3672 3684
3673 /* get device name */ 3685 /* get device name */
3674 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); 3686 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
@@ -4250,12 +4262,6 @@ _scsih_sas_volume_add(struct MPT2SAS_ADAPTER *ioc,
4250 u16 handle = le16_to_cpu(element->VolDevHandle); 4262 u16 handle = le16_to_cpu(element->VolDevHandle);
4251 int rc; 4263 int rc;
4252 4264
4253#if 0 /* RAID_HACKS */
4254 if (le32_to_cpu(event_data->Flags) &
4255 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4256 return;
4257#endif
4258
4259 mpt2sas_config_get_volume_wwid(ioc, handle, &wwid); 4265 mpt2sas_config_get_volume_wwid(ioc, handle, &wwid);
4260 if (!wwid) { 4266 if (!wwid) {
4261 printk(MPT2SAS_ERR_FMT 4267 printk(MPT2SAS_ERR_FMT
@@ -4310,12 +4316,6 @@ _scsih_sas_volume_delete(struct MPT2SAS_ADAPTER *ioc,
4310 unsigned long flags; 4316 unsigned long flags;
4311 struct MPT2SAS_TARGET *sas_target_priv_data; 4317 struct MPT2SAS_TARGET *sas_target_priv_data;
4312 4318
4313#if 0 /* RAID_HACKS */
4314 if (le32_to_cpu(event_data->Flags) &
4315 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4316 return;
4317#endif
4318
4319 spin_lock_irqsave(&ioc->raid_device_lock, flags); 4319 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4320 raid_device = _scsih_raid_device_find_by_handle(ioc, handle); 4320 raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
4321 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 4321 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
@@ -4428,14 +4428,38 @@ _scsih_sas_pd_add(struct MPT2SAS_ADAPTER *ioc,
4428 struct _sas_device *sas_device; 4428 struct _sas_device *sas_device;
4429 unsigned long flags; 4429 unsigned long flags;
4430 u16 handle = le16_to_cpu(element->PhysDiskDevHandle); 4430 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
4431 Mpi2ConfigReply_t mpi_reply;
4432 Mpi2SasDevicePage0_t sas_device_pg0;
4433 u32 ioc_status;
4431 4434
4432 spin_lock_irqsave(&ioc->sas_device_lock, flags); 4435 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4433 sas_device = _scsih_sas_device_find_by_handle(ioc, handle); 4436 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
4434 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4437 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4435 if (sas_device) 4438 if (sas_device) {
4436 sas_device->hidden_raid_component = 1; 4439 sas_device->hidden_raid_component = 1;
4437 else 4440 return;
4438 _scsih_add_device(ioc, handle, 0, 1); 4441 }
4442
4443 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
4444 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
4445 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4446 ioc->name, __FILE__, __LINE__, __func__);
4447 return;
4448 }
4449
4450 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4451 MPI2_IOCSTATUS_MASK;
4452 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4453 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4454 ioc->name, __FILE__, __LINE__, __func__);
4455 return;
4456 }
4457
4458 _scsih_link_change(ioc,
4459 le16_to_cpu(sas_device_pg0.ParentDevHandle),
4460 handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
4461
4462 _scsih_add_device(ioc, handle, 0, 1);
4439} 4463}
4440 4464
4441#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4465#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -4535,12 +4559,15 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4535{ 4559{
4536 Mpi2EventIrConfigElement_t *element; 4560 Mpi2EventIrConfigElement_t *element;
4537 int i; 4561 int i;
4562 u8 foreign_config;
4538 4563
4539#ifdef CONFIG_SCSI_MPT2SAS_LOGGING 4564#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
4540 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) 4565 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
4541 _scsih_sas_ir_config_change_event_debug(ioc, event_data); 4566 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
4542 4567
4543#endif 4568#endif
4569 foreign_config = (le32_to_cpu(event_data->Flags) &
4570 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
4544 4571
4545 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; 4572 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4546 for (i = 0; i < event_data->NumElements; i++, element++) { 4573 for (i = 0; i < event_data->NumElements; i++, element++) {
@@ -4548,11 +4575,13 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4548 switch (element->ReasonCode) { 4575 switch (element->ReasonCode) {
4549 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: 4576 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
4550 case MPI2_EVENT_IR_CHANGE_RC_ADDED: 4577 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
4551 _scsih_sas_volume_add(ioc, element); 4578 if (!foreign_config)
4579 _scsih_sas_volume_add(ioc, element);
4552 break; 4580 break;
4553 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: 4581 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
4554 case MPI2_EVENT_IR_CHANGE_RC_REMOVED: 4582 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
4555 _scsih_sas_volume_delete(ioc, element); 4583 if (!foreign_config)
4584 _scsih_sas_volume_delete(ioc, element);
4556 break; 4585 break;
4557 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: 4586 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
4558 _scsih_sas_pd_hide(ioc, element); 4587 _scsih_sas_pd_hide(ioc, element);
@@ -4671,6 +4700,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4671 u32 state; 4700 u32 state;
4672 struct _sas_device *sas_device; 4701 struct _sas_device *sas_device;
4673 unsigned long flags; 4702 unsigned long flags;
4703 Mpi2ConfigReply_t mpi_reply;
4704 Mpi2SasDevicePage0_t sas_device_pg0;
4705 u32 ioc_status;
4674 4706
4675 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) 4707 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
4676 return; 4708 return;
@@ -4687,22 +4719,40 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc, u8 VF_ID,
4687 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4719 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4688 4720
4689 switch (state) { 4721 switch (state) {
4690#if 0
4691 case MPI2_RAID_PD_STATE_OFFLINE:
4692 if (sas_device)
4693 _scsih_remove_device(ioc, handle);
4694 break;
4695#endif
4696 case MPI2_RAID_PD_STATE_ONLINE: 4722 case MPI2_RAID_PD_STATE_ONLINE:
4697 case MPI2_RAID_PD_STATE_DEGRADED: 4723 case MPI2_RAID_PD_STATE_DEGRADED:
4698 case MPI2_RAID_PD_STATE_REBUILDING: 4724 case MPI2_RAID_PD_STATE_REBUILDING:
4699 case MPI2_RAID_PD_STATE_OPTIMAL: 4725 case MPI2_RAID_PD_STATE_OPTIMAL:
4700 if (sas_device) 4726 if (sas_device) {
4701 sas_device->hidden_raid_component = 1; 4727 sas_device->hidden_raid_component = 1;
4702 else 4728 return;
4703 _scsih_add_device(ioc, handle, 0, 1); 4729 }
4730
4731 if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply,
4732 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
4733 handle))) {
4734 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4735 ioc->name, __FILE__, __LINE__, __func__);
4736 return;
4737 }
4738
4739 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4740 MPI2_IOCSTATUS_MASK;
4741 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4742 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
4743 ioc->name, __FILE__, __LINE__, __func__);
4744 return;
4745 }
4746
4747 _scsih_link_change(ioc,
4748 le16_to_cpu(sas_device_pg0.ParentDevHandle),
4749 handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
4750
4751 _scsih_add_device(ioc, handle, 0, 1);
4752
4704 break; 4753 break;
4705 4754
4755 case MPI2_RAID_PD_STATE_OFFLINE:
4706 case MPI2_RAID_PD_STATE_NOT_CONFIGURED: 4756 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
4707 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: 4757 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
4708 case MPI2_RAID_PD_STATE_HOT_SPARE: 4758 case MPI2_RAID_PD_STATE_HOT_SPARE:
@@ -5774,6 +5824,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
5774 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 5824 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
5775 u32 device_state; 5825 u32 device_state;
5776 5826
5827 mpt2sas_base_stop_watchdog(ioc);
5777 flush_scheduled_work(); 5828 flush_scheduled_work();
5778 scsi_block_requests(shost); 5829 scsi_block_requests(shost);
5779 device_state = pci_choose_state(pdev, state); 5830 device_state = pci_choose_state(pdev, state);
@@ -5816,6 +5867,7 @@ _scsih_resume(struct pci_dev *pdev)
5816 5867
5817 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); 5868 mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
5818 scsi_unblock_requests(shost); 5869 scsi_unblock_requests(shost);
5870 mpt2sas_base_start_watchdog(ioc);
5819 return 0; 5871 return 0;
5820} 5872}
5821#endif /* CONFIG_PM */ 5873#endif /* CONFIG_PM */
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 037c1e0b7c4c..6553833c12db 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -527,7 +527,7 @@ config SERIAL_S3C24A0
527 527
528config SERIAL_S3C6400 528config SERIAL_S3C6400
529 tristate "Samsung S3C6400/S3C6410 Serial port support" 529 tristate "Samsung S3C6400/S3C6410 Serial port support"
530 depends on SERIAL_SAMSUNG && (CPU_S3C600 || CPU_S3C6410) 530 depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410)
531 default y 531 default y
532 help 532 help
533 Serial port support for the Samsung S3C6400 and S3C6410 533 Serial port support for the Samsung S3C6400 and S3C6410
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index e0d44af4745a..3f3119d760db 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -111,29 +111,32 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
111 unsigned int bpw; 111 unsigned int bpw;
112 unsigned int hz; 112 unsigned int hz;
113 unsigned int div; 113 unsigned int div;
114 unsigned long clk;
114 115
115 bpw = t ? t->bits_per_word : spi->bits_per_word; 116 bpw = t ? t->bits_per_word : spi->bits_per_word;
116 hz = t ? t->speed_hz : spi->max_speed_hz; 117 hz = t ? t->speed_hz : spi->max_speed_hz;
117 118
119 if (!bpw)
120 bpw = 8;
121
122 if (!hz)
123 hz = spi->max_speed_hz;
124
118 if (bpw != 8) { 125 if (bpw != 8) {
119 dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); 126 dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw);
120 return -EINVAL; 127 return -EINVAL;
121 } 128 }
122 129
123 div = clk_get_rate(hw->clk) / hz; 130 clk = clk_get_rate(hw->clk);
124 131 div = DIV_ROUND_UP(clk, hz * 2) - 1;
125 /* is clk = pclk / (2 * (pre+1)), or is it
126 * clk = (pclk * 2) / ( pre + 1) */
127
128 div /= 2;
129
130 if (div > 0)
131 div -= 1;
132 132
133 if (div > 255) 133 if (div > 255)
134 div = 255; 134 div = 255;
135 135
136 dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", div, hz); 136 dev_dbg(&spi->dev, "setting pre-scaler to %d (wanted %d, got %ld)\n",
137 div, hz, clk / (2 * (div + 1)));
138
139
137 writeb(div, hw->regs + S3C2410_SPPRE); 140 writeb(div, hw->regs + S3C2410_SPPRE);
138 141
139 spin_lock(&hw->bitbang.lock); 142 spin_lock(&hw->bitbang.lock);
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 0a69672097a8..4e83c297ec9e 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -953,7 +953,12 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
953 953
954 mutex_lock(&tz->lock); 954 mutex_lock(&tz->lock);
955 955
956 tz->ops->get_temp(tz, &temp); 956 if (tz->ops->get_temp(tz, &temp)) {
957 /* get_temp failed - retry it later */
958 printk(KERN_WARNING PREFIX "failed to read out thermal zone "
959 "%d\n", tz->id);
960 goto leave;
961 }
957 962
958 for (count = 0; count < tz->trips; count++) { 963 for (count = 0; count < tz->trips; count++) {
959 tz->ops->get_trip_type(tz, count, &trip_type); 964 tz->ops->get_trip_type(tz, count, &trip_type);
@@ -1005,6 +1010,8 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
1005 THERMAL_TRIPS_NONE); 1010 THERMAL_TRIPS_NONE);
1006 1011
1007 tz->last_temperature = temp; 1012 tz->last_temperature = temp;
1013
1014 leave:
1008 if (tz->passive) 1015 if (tz->passive)
1009 thermal_zone_device_set_polling(tz, tz->passive_delay); 1016 thermal_zone_device_set_polling(tz, tz->passive_delay);
1010 else if (tz->polling_delay) 1017 else if (tz->polling_delay)
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 8f24564f77b0..07f22b625632 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -481,6 +481,9 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
481 /* tell the board code to enable the panel */ 481 /* tell the board code to enable the panel */
482 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 482 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
483 ch = &priv->ch[k]; 483 ch = &priv->ch[k];
484 if (!ch->enabled)
485 continue;
486
484 board_cfg = &ch->cfg.board_cfg; 487 board_cfg = &ch->cfg.board_cfg;
485 if (board_cfg->display_on) 488 if (board_cfg->display_on)
486 board_cfg->display_on(board_cfg->board_data); 489 board_cfg->display_on(board_cfg->board_data);
@@ -498,6 +501,8 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
498 /* clean up deferred io and ask board code to disable panel */ 501 /* clean up deferred io and ask board code to disable panel */
499 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 502 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
500 ch = &priv->ch[k]; 503 ch = &priv->ch[k];
504 if (!ch->enabled)
505 continue;
501 506
502 /* deferred io mode: 507 /* deferred io mode:
503 * flush frame, and wait for frame end interrupt 508 * flush frame, and wait for frame end interrupt
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 15502d5e3641..54cd91610174 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -454,6 +454,10 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
454 454
455 xenfb_init_shared_page(info, fb_info); 455 xenfb_init_shared_page(info, fb_info);
456 456
457 ret = xenfb_connect_backend(dev, info);
458 if (ret < 0)
459 goto error;
460
457 ret = register_framebuffer(fb_info); 461 ret = register_framebuffer(fb_info);
458 if (ret) { 462 if (ret) {
459 fb_deferred_io_cleanup(fb_info); 463 fb_deferred_io_cleanup(fb_info);
@@ -464,10 +468,6 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
464 } 468 }
465 info->fb_info = fb_info; 469 info->fb_info = fb_info;
466 470
467 ret = xenfb_connect_backend(dev, info);
468 if (ret < 0)
469 goto error;
470
471 xenfb_make_preferred_console(); 471 xenfb_make_preferred_console();
472 return 0; 472 return 0;
473 473
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index 3fe9742c23ca..2f8643efe92c 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -37,7 +37,7 @@
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38 38
39#include <asm/addrspace.h> 39#include <asm/addrspace.h>
40#include <asm/ar7/ar7.h> 40#include <asm/mach-ar7/ar7.h>
41 41
42#define DRVNAME "ar7_wdt" 42#define DRVNAME "ar7_wdt"
43#define LONGNAME "TI AR7 Watchdog Timer" 43#define LONGNAME "TI AR7 Watchdog Timer"