aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ac.c33
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/processor_driver.c8
-rw-r--r--drivers/acpi/processor_idle.c29
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/base/power/common.c12
-rw-r--r--drivers/block/rbd.c935
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c14
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c3
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c16
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm15
-rw-r--r--drivers/cpufreq/arm_big_little.c7
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c9
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c27
-rw-r--r--drivers/cpufreq/cpufreq.c10
-rw-r--r--drivers/cpufreq/cpufreq_governor.c11
-rw-r--r--drivers/cpufreq/cpufreq_governor.h1
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/cpufreq_stats.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c122
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c5
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/drm_drv.c20
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c34
-rw-r--r--drivers/gpu/drm/drm_modes.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c77
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c44
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c90
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c29
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h7
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c1
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c12
-rw-r--r--drivers/lguest/page_tables.c1
-rw-r--r--drivers/mmc/host/mmci.c9
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c25
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/Kconfig3
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c20
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c29
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c95
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c46
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h138
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c10
-rw-r--r--drivers/net/wireless/b43/dma.c19
-rw-r--r--drivers/net/wireless/b43/dma.h4
-rw-r--r--drivers/net/wireless/b43/main.c43
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c3
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c3
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c1
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c21
-rw-r--r--drivers/ntb/ntb_hw.c10
-rw-r--r--drivers/ntb/ntb_transport.c175
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/spi/spi-atmel.c51
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/target/iscsi/iscsi_target.c63
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h4
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c14
-rw-r--r--drivers/target/target_core_file.c9
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_rd.c21
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_transport.c13
-rw-r--r--drivers/vhost/vringh.c3
103 files changed, 1610 insertions, 1149 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 00d2efd674df..4f4e741d34b2 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -28,6 +28,8 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/dmi.h>
32#include <linux/delay.h>
31#ifdef CONFIG_ACPI_PROCFS_POWER 33#ifdef CONFIG_ACPI_PROCFS_POWER
32#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 35#include <linux/seq_file.h>
@@ -74,6 +76,8 @@ static int acpi_ac_resume(struct device *dev);
74#endif 76#endif
75static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); 77static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
76 78
79static int ac_sleep_before_get_state_ms;
80
77static struct acpi_driver acpi_ac_driver = { 81static struct acpi_driver acpi_ac_driver = {
78 .name = "ac", 82 .name = "ac",
79 .class = ACPI_AC_CLASS, 83 .class = ACPI_AC_CLASS,
@@ -252,6 +256,16 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
252 case ACPI_AC_NOTIFY_STATUS: 256 case ACPI_AC_NOTIFY_STATUS:
253 case ACPI_NOTIFY_BUS_CHECK: 257 case ACPI_NOTIFY_BUS_CHECK:
254 case ACPI_NOTIFY_DEVICE_CHECK: 258 case ACPI_NOTIFY_DEVICE_CHECK:
259 /*
260 * A buggy BIOS may notify AC first and then sleep for
261 * a specific time before doing actual operations in the
262 * EC event handler (_Qxx). This will cause the AC state
263 * reported by the ACPI event to be incorrect, so wait for a
264 * specific time for the EC event handler to make progress.
265 */
266 if (ac_sleep_before_get_state_ms > 0)
267 msleep(ac_sleep_before_get_state_ms);
268
255 acpi_ac_get_state(ac); 269 acpi_ac_get_state(ac);
256 acpi_bus_generate_proc_event(device, event, (u32) ac->state); 270 acpi_bus_generate_proc_event(device, event, (u32) ac->state);
257 acpi_bus_generate_netlink_event(device->pnp.device_class, 271 acpi_bus_generate_netlink_event(device->pnp.device_class,
@@ -264,6 +278,24 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
264 return; 278 return;
265} 279}
266 280
281static int thinkpad_e530_quirk(const struct dmi_system_id *d)
282{
283 ac_sleep_before_get_state_ms = 1000;
284 return 0;
285}
286
287static struct dmi_system_id ac_dmi_table[] = {
288 {
289 .callback = thinkpad_e530_quirk,
290 .ident = "thinkpad e530",
291 .matches = {
292 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
293 DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
294 },
295 },
296 {},
297};
298
267static int acpi_ac_add(struct acpi_device *device) 299static int acpi_ac_add(struct acpi_device *device)
268{ 300{
269 int result = 0; 301 int result = 0;
@@ -312,6 +344,7 @@ static int acpi_ac_add(struct acpi_device *device)
312 kfree(ac); 344 kfree(ac);
313 } 345 }
314 346
347 dmi_check_system(ac_dmi_table);
315 return result; 348 return result;
316} 349}
317 350
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d45b2871d33b..edc00818c803 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
223static int ec_poll(struct acpi_ec *ec) 223static int ec_poll(struct acpi_ec *ec)
224{ 224{
225 unsigned long flags; 225 unsigned long flags;
226 int repeat = 2; /* number of command restarts */ 226 int repeat = 5; /* number of command restarts */
227 while (repeat--) { 227 while (repeat--) {
228 unsigned long delay = jiffies + 228 unsigned long delay = jiffies +
229 msecs_to_jiffies(ec_delay); 229 msecs_to_jiffies(ec_delay);
@@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec)
241 } 241 }
242 advance_transaction(ec, acpi_ec_read_status(ec)); 242 advance_transaction(ec, acpi_ec_read_status(ec));
243 } while (time_before(jiffies, delay)); 243 } while (time_before(jiffies, delay));
244 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
245 break;
246 pr_debug(PREFIX "controller reset, restart transaction\n"); 244 pr_debug(PREFIX "controller reset, restart transaction\n");
247 spin_lock_irqsave(&ec->lock, flags); 245 spin_lock_irqsave(&ec->lock, flags);
248 start_transaction(ec); 246 start_transaction(ec);
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index bec717ffd25f..c266cdc11784 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -95,9 +95,6 @@ static const struct acpi_device_id processor_device_ids[] = {
95}; 95};
96MODULE_DEVICE_TABLE(acpi, processor_device_ids); 96MODULE_DEVICE_TABLE(acpi, processor_device_ids);
97 97
98static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
99 acpi_processor_suspend, acpi_processor_resume);
100
101static struct acpi_driver acpi_processor_driver = { 98static struct acpi_driver acpi_processor_driver = {
102 .name = "processor", 99 .name = "processor",
103 .class = ACPI_PROCESSOR_CLASS, 100 .class = ACPI_PROCESSOR_CLASS,
@@ -107,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = {
107 .remove = acpi_processor_remove, 104 .remove = acpi_processor_remove,
108 .notify = acpi_processor_notify, 105 .notify = acpi_processor_notify,
109 }, 106 },
110 .drv.pm = &acpi_processor_pm,
111}; 107};
112 108
113#define INSTALL_NOTIFY_HANDLER 1 109#define INSTALL_NOTIFY_HANDLER 1
@@ -934,6 +930,8 @@ static int __init acpi_processor_init(void)
934 if (result < 0) 930 if (result < 0)
935 return result; 931 return result;
936 932
933 acpi_processor_syscore_init();
934
937 acpi_processor_install_hotplug_notify(); 935 acpi_processor_install_hotplug_notify();
938 936
939 acpi_thermal_cpufreq_init(); 937 acpi_thermal_cpufreq_init();
@@ -956,6 +954,8 @@ static void __exit acpi_processor_exit(void)
956 954
957 acpi_processor_uninstall_hotplug_notify(); 955 acpi_processor_uninstall_hotplug_notify();
958 956
957 acpi_processor_syscore_exit();
958
959 acpi_bus_unregister_driver(&acpi_processor_driver); 959 acpi_bus_unregister_driver(&acpi_processor_driver);
960 960
961 return; 961 return;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f0df2c9434d2..eb133c77aadb 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -34,6 +34,7 @@
34#include <linux/sched.h> /* need_resched() */ 34#include <linux/sched.h> /* need_resched() */
35#include <linux/clockchips.h> 35#include <linux/clockchips.h>
36#include <linux/cpuidle.h> 36#include <linux/cpuidle.h>
37#include <linux/syscore_ops.h>
37 38
38/* 39/*
39 * Include the apic definitions for x86 to have the APIC timer related defines 40 * Include the apic definitions for x86 to have the APIC timer related defines
@@ -210,33 +211,41 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
210 211
211#endif 212#endif
212 213
214#ifdef CONFIG_PM_SLEEP
213static u32 saved_bm_rld; 215static u32 saved_bm_rld;
214 216
215static void acpi_idle_bm_rld_save(void) 217int acpi_processor_suspend(void)
216{ 218{
217 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); 219 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
220 return 0;
218} 221}
219static void acpi_idle_bm_rld_restore(void) 222
223void acpi_processor_resume(void)
220{ 224{
221 u32 resumed_bm_rld; 225 u32 resumed_bm_rld;
222 226
223 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); 227 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
228 if (resumed_bm_rld == saved_bm_rld)
229 return;
224 230
225 if (resumed_bm_rld != saved_bm_rld) 231 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
226 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
227} 232}
228 233
229int acpi_processor_suspend(struct device *dev) 234static struct syscore_ops acpi_processor_syscore_ops = {
235 .suspend = acpi_processor_suspend,
236 .resume = acpi_processor_resume,
237};
238
239void acpi_processor_syscore_init(void)
230{ 240{
231 acpi_idle_bm_rld_save(); 241 register_syscore_ops(&acpi_processor_syscore_ops);
232 return 0;
233} 242}
234 243
235int acpi_processor_resume(struct device *dev) 244void acpi_processor_syscore_exit(void)
236{ 245{
237 acpi_idle_bm_rld_restore(); 246 unregister_syscore_ops(&acpi_processor_syscore_ops);
238 return 0;
239} 247}
248#endif /* CONFIG_PM_SLEEP */
240 249
241#if defined(CONFIG_X86) 250#if defined(CONFIG_X86)
242static void tsc_check_state(int state) 251static void tsc_check_state(int state)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fe158fd4f1df..c1bc608339a6 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1785,7 +1785,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
1785 acpi_set_pnp_ids(handle, &pnp, type); 1785 acpi_set_pnp_ids(handle, &pnp, type);
1786 1786
1787 if (!pnp.type.hardware_id) 1787 if (!pnp.type.hardware_id)
1788 return; 1788 goto out;
1789 1789
1790 /* 1790 /*
1791 * This relies on the fact that acpi_install_notify_handler() will not 1791 * This relies on the fact that acpi_install_notify_handler() will not
@@ -1800,6 +1800,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
1800 } 1800 }
1801 } 1801 }
1802 1802
1803out:
1803 acpi_free_pnp_ids(&pnp); 1804 acpi_free_pnp_ids(&pnp);
1804} 1805}
1805 1806
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index c3932d0876e0..5b32e15a65ce 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -456,6 +456,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
456 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), 456 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
457 }, 457 },
458 }, 458 },
459 {
460 .callback = video_ignore_initial_backlight,
461 .ident = "HP 1000 Notebook PC",
462 .matches = {
463 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
465 },
466 },
459 {} 467 {}
460}; 468};
461 469
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 39c32529b833..5da914041305 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
61int dev_pm_put_subsys_data(struct device *dev) 61int dev_pm_put_subsys_data(struct device *dev)
62{ 62{
63 struct pm_subsys_data *psd; 63 struct pm_subsys_data *psd;
64 int ret = 0; 64 int ret = 1;
65 65
66 spin_lock_irq(&dev->power.lock); 66 spin_lock_irq(&dev->power.lock);
67 67
68 psd = dev_to_psd(dev); 68 psd = dev_to_psd(dev);
69 if (!psd) { 69 if (!psd)
70 ret = -EINVAL;
71 goto out; 70 goto out;
72 }
73 71
74 if (--psd->refcount == 0) { 72 if (--psd->refcount == 0) {
75 dev->power.subsys_data = NULL; 73 dev->power.subsys_data = NULL;
76 kfree(psd); 74 } else {
77 ret = 1; 75 psd = NULL;
76 ret = 0;
78 } 77 }
79 78
80 out: 79 out:
81 spin_unlock_irq(&dev->power.lock); 80 spin_unlock_irq(&dev->power.lock);
81 kfree(psd);
82 82
83 return ret; 83 return ret;
84} 84}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index ca63104136e0..d6d314027b5d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -55,6 +55,39 @@
55#define SECTOR_SHIFT 9 55#define SECTOR_SHIFT 9
56#define SECTOR_SIZE (1ULL << SECTOR_SHIFT) 56#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
57 57
58/*
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
63 */
64static int atomic_inc_return_safe(atomic_t *v)
65{
66 unsigned int counter;
67
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
70 return (int)counter;
71
72 atomic_dec(v);
73
74 return -EINVAL;
75}
76
77/* Decrement the counter. Return the resulting value, or -EINVAL */
78static int atomic_dec_return_safe(atomic_t *v)
79{
80 int counter;
81
82 counter = atomic_dec_return(v);
83 if (counter >= 0)
84 return counter;
85
86 atomic_inc(v);
87
88 return -EINVAL;
89}
90
58#define RBD_DRV_NAME "rbd" 91#define RBD_DRV_NAME "rbd"
59#define RBD_DRV_NAME_LONG "rbd (rados block device)" 92#define RBD_DRV_NAME_LONG "rbd (rados block device)"
60 93
@@ -100,21 +133,20 @@
100 * block device image metadata (in-memory version) 133 * block device image metadata (in-memory version)
101 */ 134 */
102struct rbd_image_header { 135struct rbd_image_header {
103 /* These four fields never change for a given rbd image */ 136 /* These six fields never change for a given rbd image */
104 char *object_prefix; 137 char *object_prefix;
105 u64 features;
106 __u8 obj_order; 138 __u8 obj_order;
107 __u8 crypt_type; 139 __u8 crypt_type;
108 __u8 comp_type; 140 __u8 comp_type;
141 u64 stripe_unit;
142 u64 stripe_count;
143 u64 features; /* Might be changeable someday? */
109 144
110 /* The remaining fields need to be updated occasionally */ 145 /* The remaining fields need to be updated occasionally */
111 u64 image_size; 146 u64 image_size;
112 struct ceph_snap_context *snapc; 147 struct ceph_snap_context *snapc;
113 char *snap_names; 148 char *snap_names; /* format 1 only */
114 u64 *snap_sizes; 149 u64 *snap_sizes; /* format 1 only */
115
116 u64 stripe_unit;
117 u64 stripe_count;
118}; 150};
119 151
120/* 152/*
@@ -225,6 +257,7 @@ struct rbd_obj_request {
225 }; 257 };
226 }; 258 };
227 struct page **copyup_pages; 259 struct page **copyup_pages;
260 u32 copyup_page_count;
228 261
229 struct ceph_osd_request *osd_req; 262 struct ceph_osd_request *osd_req;
230 263
@@ -257,6 +290,7 @@ struct rbd_img_request {
257 struct rbd_obj_request *obj_request; /* obj req initiator */ 290 struct rbd_obj_request *obj_request; /* obj req initiator */
258 }; 291 };
259 struct page **copyup_pages; 292 struct page **copyup_pages;
293 u32 copyup_page_count;
260 spinlock_t completion_lock;/* protects next_completion */ 294 spinlock_t completion_lock;/* protects next_completion */
261 u32 next_completion; 295 u32 next_completion;
262 rbd_img_callback_t callback; 296 rbd_img_callback_t callback;
@@ -311,6 +345,7 @@ struct rbd_device {
311 345
312 struct rbd_spec *parent_spec; 346 struct rbd_spec *parent_spec;
313 u64 parent_overlap; 347 u64 parent_overlap;
348 atomic_t parent_ref;
314 struct rbd_device *parent; 349 struct rbd_device *parent;
315 350
316 /* protects updating the header */ 351 /* protects updating the header */
@@ -359,7 +394,8 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf,
359 size_t count); 394 size_t count);
360static ssize_t rbd_remove(struct bus_type *bus, const char *buf, 395static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
361 size_t count); 396 size_t count);
362static int rbd_dev_image_probe(struct rbd_device *rbd_dev); 397static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398static void rbd_spec_put(struct rbd_spec *spec);
363 399
364static struct bus_attribute rbd_bus_attrs[] = { 400static struct bus_attribute rbd_bus_attrs[] = {
365 __ATTR(add, S_IWUSR, NULL, rbd_add), 401 __ATTR(add, S_IWUSR, NULL, rbd_add),
@@ -426,7 +462,8 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
426static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 462static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
427 463
428static int rbd_dev_refresh(struct rbd_device *rbd_dev); 464static int rbd_dev_refresh(struct rbd_device *rbd_dev);
429static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); 465static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
430static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 467static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
431 u64 snap_id); 468 u64 snap_id);
432static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 469static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -726,88 +763,123 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
726} 763}
727 764
728/* 765/*
729 * Create a new header structure, translate header format from the on-disk 766 * Fill an rbd image header with information from the given format 1
730 * header. 767 * on-disk header.
731 */ 768 */
732static int rbd_header_from_disk(struct rbd_image_header *header, 769static int rbd_header_from_disk(struct rbd_device *rbd_dev,
733 struct rbd_image_header_ondisk *ondisk) 770 struct rbd_image_header_ondisk *ondisk)
734{ 771{
772 struct rbd_image_header *header = &rbd_dev->header;
773 bool first_time = header->object_prefix == NULL;
774 struct ceph_snap_context *snapc;
775 char *object_prefix = NULL;
776 char *snap_names = NULL;
777 u64 *snap_sizes = NULL;
735 u32 snap_count; 778 u32 snap_count;
736 size_t len;
737 size_t size; 779 size_t size;
780 int ret = -ENOMEM;
738 u32 i; 781 u32 i;
739 782
740 memset(header, 0, sizeof (*header)); 783 /* Allocate this now to avoid having to handle failure below */
741 784
742 snap_count = le32_to_cpu(ondisk->snap_count); 785 if (first_time) {
786 size_t len;
743 787
744 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix)); 788 len = strnlen(ondisk->object_prefix,
745 header->object_prefix = kmalloc(len + 1, GFP_KERNEL); 789 sizeof (ondisk->object_prefix));
746 if (!header->object_prefix) 790 object_prefix = kmalloc(len + 1, GFP_KERNEL);
747 return -ENOMEM; 791 if (!object_prefix)
748 memcpy(header->object_prefix, ondisk->object_prefix, len); 792 return -ENOMEM;
749 header->object_prefix[len] = '\0'; 793 memcpy(object_prefix, ondisk->object_prefix, len);
794 object_prefix[len] = '\0';
795 }
750 796
797 /* Allocate the snapshot context and fill it in */
798
799 snap_count = le32_to_cpu(ondisk->snap_count);
800 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
801 if (!snapc)
802 goto out_err;
803 snapc->seq = le64_to_cpu(ondisk->snap_seq);
751 if (snap_count) { 804 if (snap_count) {
805 struct rbd_image_snap_ondisk *snaps;
752 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); 806 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
753 807
754 /* Save a copy of the snapshot names */ 808 /* We'll keep a copy of the snapshot names... */
755 809
756 if (snap_names_len > (u64) SIZE_MAX) 810 if (snap_names_len > (u64)SIZE_MAX)
757 return -EIO; 811 goto out_2big;
758 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL); 812 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
759 if (!header->snap_names) 813 if (!snap_names)
760 goto out_err; 814 goto out_err;
815
816 /* ...as well as the array of their sizes. */
817
818 size = snap_count * sizeof (*header->snap_sizes);
819 snap_sizes = kmalloc(size, GFP_KERNEL);
820 if (!snap_sizes)
821 goto out_err;
822
761 /* 823 /*
762 * Note that rbd_dev_v1_header_read() guarantees 824 * Copy the names, and fill in each snapshot's id
763 * the ondisk buffer we're working with has 825 * and size.
826 *
827 * Note that rbd_dev_v1_header_info() guarantees the
828 * ondisk buffer we're working with has
764 * snap_names_len bytes beyond the end of the 829 * snap_names_len bytes beyond the end of the
765 * snapshot id array, this memcpy() is safe. 830 * snapshot id array, this memcpy() is safe.
766 */ 831 */
767 memcpy(header->snap_names, &ondisk->snaps[snap_count], 832 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
768 snap_names_len); 833 snaps = ondisk->snaps;
834 for (i = 0; i < snap_count; i++) {
835 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
836 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
837 }
838 }
769 839
770 /* Record each snapshot's size */ 840 /* We won't fail any more, fill in the header */
771 841
772 size = snap_count * sizeof (*header->snap_sizes); 842 down_write(&rbd_dev->header_rwsem);
773 header->snap_sizes = kmalloc(size, GFP_KERNEL); 843 if (first_time) {
774 if (!header->snap_sizes) 844 header->object_prefix = object_prefix;
775 goto out_err; 845 header->obj_order = ondisk->options.order;
776 for (i = 0; i < snap_count; i++) 846 header->crypt_type = ondisk->options.crypt_type;
777 header->snap_sizes[i] = 847 header->comp_type = ondisk->options.comp_type;
778 le64_to_cpu(ondisk->snaps[i].image_size); 848 /* The rest aren't used for format 1 images */
849 header->stripe_unit = 0;
850 header->stripe_count = 0;
851 header->features = 0;
779 } else { 852 } else {
780 header->snap_names = NULL; 853 ceph_put_snap_context(header->snapc);
781 header->snap_sizes = NULL; 854 kfree(header->snap_names);
855 kfree(header->snap_sizes);
782 } 856 }
783 857
784 header->features = 0; /* No features support in v1 images */ 858 /* The remaining fields always get updated (when we refresh) */
785 header->obj_order = ondisk->options.order;
786 header->crypt_type = ondisk->options.crypt_type;
787 header->comp_type = ondisk->options.comp_type;
788
789 /* Allocate and fill in the snapshot context */
790 859
791 header->image_size = le64_to_cpu(ondisk->image_size); 860 header->image_size = le64_to_cpu(ondisk->image_size);
861 header->snapc = snapc;
862 header->snap_names = snap_names;
863 header->snap_sizes = snap_sizes;
792 864
793 header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 865 /* Make sure mapping size is consistent with header info */
794 if (!header->snapc)
795 goto out_err;
796 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
797 for (i = 0; i < snap_count; i++)
798 header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
799 866
800 return 0; 867 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
868 if (rbd_dev->mapping.size != header->image_size)
869 rbd_dev->mapping.size = header->image_size;
870
871 up_write(&rbd_dev->header_rwsem);
801 872
873 return 0;
874out_2big:
875 ret = -EIO;
802out_err: 876out_err:
803 kfree(header->snap_sizes); 877 kfree(snap_sizes);
804 header->snap_sizes = NULL; 878 kfree(snap_names);
805 kfree(header->snap_names); 879 ceph_put_snap_context(snapc);
806 header->snap_names = NULL; 880 kfree(object_prefix);
807 kfree(header->object_prefix);
808 header->object_prefix = NULL;
809 881
810 return -ENOMEM; 882 return ret;
811} 883}
812 884
813static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) 885static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
@@ -934,20 +1006,11 @@ static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
934 1006
935static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) 1007static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
936{ 1008{
937 const char *snap_name = rbd_dev->spec->snap_name; 1009 u64 snap_id = rbd_dev->spec->snap_id;
938 u64 snap_id;
939 u64 size = 0; 1010 u64 size = 0;
940 u64 features = 0; 1011 u64 features = 0;
941 int ret; 1012 int ret;
942 1013
943 if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
944 snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
945 if (snap_id == CEPH_NOSNAP)
946 return -ENOENT;
947 } else {
948 snap_id = CEPH_NOSNAP;
949 }
950
951 ret = rbd_snap_size(rbd_dev, snap_id, &size); 1014 ret = rbd_snap_size(rbd_dev, snap_id, &size);
952 if (ret) 1015 if (ret)
953 return ret; 1016 return ret;
@@ -958,11 +1021,6 @@ static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
958 rbd_dev->mapping.size = size; 1021 rbd_dev->mapping.size = size;
959 rbd_dev->mapping.features = features; 1022 rbd_dev->mapping.features = features;
960 1023
961 /* If we are mapping a snapshot it must be marked read-only */
962
963 if (snap_id != CEPH_NOSNAP)
964 rbd_dev->mapping.read_only = true;
965
966 return 0; 1024 return 0;
967} 1025}
968 1026
@@ -970,14 +1028,6 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
970{ 1028{
971 rbd_dev->mapping.size = 0; 1029 rbd_dev->mapping.size = 0;
972 rbd_dev->mapping.features = 0; 1030 rbd_dev->mapping.features = 0;
973 rbd_dev->mapping.read_only = true;
974}
975
976static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
977{
978 rbd_dev->mapping.size = 0;
979 rbd_dev->mapping.features = 0;
980 rbd_dev->mapping.read_only = true;
981} 1031}
982 1032
983static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) 1033static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
@@ -1342,20 +1392,18 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1342 kref_put(&obj_request->kref, rbd_obj_request_destroy); 1392 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1343} 1393}
1344 1394
1345static void rbd_img_request_get(struct rbd_img_request *img_request) 1395static bool img_request_child_test(struct rbd_img_request *img_request);
1346{ 1396static void rbd_parent_request_destroy(struct kref *kref);
1347 dout("%s: img %p (was %d)\n", __func__, img_request,
1348 atomic_read(&img_request->kref.refcount));
1349 kref_get(&img_request->kref);
1350}
1351
1352static void rbd_img_request_destroy(struct kref *kref); 1397static void rbd_img_request_destroy(struct kref *kref);
1353static void rbd_img_request_put(struct rbd_img_request *img_request) 1398static void rbd_img_request_put(struct rbd_img_request *img_request)
1354{ 1399{
1355 rbd_assert(img_request != NULL); 1400 rbd_assert(img_request != NULL);
1356 dout("%s: img %p (was %d)\n", __func__, img_request, 1401 dout("%s: img %p (was %d)\n", __func__, img_request,
1357 atomic_read(&img_request->kref.refcount)); 1402 atomic_read(&img_request->kref.refcount));
1358 kref_put(&img_request->kref, rbd_img_request_destroy); 1403 if (img_request_child_test(img_request))
1404 kref_put(&img_request->kref, rbd_parent_request_destroy);
1405 else
1406 kref_put(&img_request->kref, rbd_img_request_destroy);
1359} 1407}
1360 1408
1361static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, 1409static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
@@ -1472,6 +1520,12 @@ static void img_request_child_set(struct rbd_img_request *img_request)
1472 smp_mb(); 1520 smp_mb();
1473} 1521}
1474 1522
1523static void img_request_child_clear(struct rbd_img_request *img_request)
1524{
1525 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1526 smp_mb();
1527}
1528
1475static bool img_request_child_test(struct rbd_img_request *img_request) 1529static bool img_request_child_test(struct rbd_img_request *img_request)
1476{ 1530{
1477 smp_mb(); 1531 smp_mb();
@@ -1484,6 +1538,12 @@ static void img_request_layered_set(struct rbd_img_request *img_request)
1484 smp_mb(); 1538 smp_mb();
1485} 1539}
1486 1540
1541static void img_request_layered_clear(struct rbd_img_request *img_request)
1542{
1543 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1544 smp_mb();
1545}
1546
1487static bool img_request_layered_test(struct rbd_img_request *img_request) 1547static bool img_request_layered_test(struct rbd_img_request *img_request)
1488{ 1548{
1489 smp_mb(); 1549 smp_mb();
@@ -1827,6 +1887,74 @@ static void rbd_obj_request_destroy(struct kref *kref)
1827 kmem_cache_free(rbd_obj_request_cache, obj_request); 1887 kmem_cache_free(rbd_obj_request_cache, obj_request);
1828} 1888}
1829 1889
1890/* It's OK to call this for a device with no parent */
1891
1892static void rbd_spec_put(struct rbd_spec *spec);
1893static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1894{
1895 rbd_dev_remove_parent(rbd_dev);
1896 rbd_spec_put(rbd_dev->parent_spec);
1897 rbd_dev->parent_spec = NULL;
1898 rbd_dev->parent_overlap = 0;
1899}
1900
1901/*
1902 * Parent image reference counting is used to determine when an
1903 * image's parent fields can be safely torn down--after there are no
1904 * more in-flight requests to the parent image. When the last
1905 * reference is dropped, cleaning them up is safe.
1906 */
1907static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1908{
1909 int counter;
1910
1911 if (!rbd_dev->parent_spec)
1912 return;
1913
1914 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1915 if (counter > 0)
1916 return;
1917
1918 /* Last reference; clean up parent data structures */
1919
1920 if (!counter)
1921 rbd_dev_unparent(rbd_dev);
1922 else
1923 rbd_warn(rbd_dev, "parent reference underflow\n");
1924}
1925
1926/*
1927 * If an image has a non-zero parent overlap, get a reference to its
1928 * parent.
1929 *
1930 * We must get the reference before checking for the overlap to
1931 * coordinate properly with zeroing the parent overlap in
1932 * rbd_dev_v2_parent_info() when an image gets flattened. We
1933 * drop it again if there is no overlap.
1934 *
1935 * Returns true if the rbd device has a parent with a non-zero
1936 * overlap and a reference for it was successfully taken, or
1937 * false otherwise.
1938 */
1939static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1940{
1941 int counter;
1942
1943 if (!rbd_dev->parent_spec)
1944 return false;
1945
1946 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1947 if (counter > 0 && rbd_dev->parent_overlap)
1948 return true;
1949
1950 /* Image was flattened, but parent is not yet torn down */
1951
1952 if (counter < 0)
1953 rbd_warn(rbd_dev, "parent reference overflow\n");
1954
1955 return false;
1956}
1957
1830/* 1958/*
1831 * Caller is responsible for filling in the list of object requests 1959 * Caller is responsible for filling in the list of object requests
1832 * that comprises the image request, and the Linux request pointer 1960 * that comprises the image request, and the Linux request pointer
@@ -1835,8 +1963,7 @@ static void rbd_obj_request_destroy(struct kref *kref)
1835static struct rbd_img_request *rbd_img_request_create( 1963static struct rbd_img_request *rbd_img_request_create(
1836 struct rbd_device *rbd_dev, 1964 struct rbd_device *rbd_dev,
1837 u64 offset, u64 length, 1965 u64 offset, u64 length,
1838 bool write_request, 1966 bool write_request)
1839 bool child_request)
1840{ 1967{
1841 struct rbd_img_request *img_request; 1968 struct rbd_img_request *img_request;
1842 1969
@@ -1861,9 +1988,7 @@ static struct rbd_img_request *rbd_img_request_create(
1861 } else { 1988 } else {
1862 img_request->snap_id = rbd_dev->spec->snap_id; 1989 img_request->snap_id = rbd_dev->spec->snap_id;
1863 } 1990 }
1864 if (child_request) 1991 if (rbd_dev_parent_get(rbd_dev))
1865 img_request_child_set(img_request);
1866 if (rbd_dev->parent_spec)
1867 img_request_layered_set(img_request); 1992 img_request_layered_set(img_request);
1868 spin_lock_init(&img_request->completion_lock); 1993 spin_lock_init(&img_request->completion_lock);
1869 img_request->next_completion = 0; 1994 img_request->next_completion = 0;
@@ -1873,9 +1998,6 @@ static struct rbd_img_request *rbd_img_request_create(
1873 INIT_LIST_HEAD(&img_request->obj_requests); 1998 INIT_LIST_HEAD(&img_request->obj_requests);
1874 kref_init(&img_request->kref); 1999 kref_init(&img_request->kref);
1875 2000
1876 rbd_img_request_get(img_request); /* Avoid a warning */
1877 rbd_img_request_put(img_request); /* TEMPORARY */
1878
1879 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, 2001 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1880 write_request ? "write" : "read", offset, length, 2002 write_request ? "write" : "read", offset, length,
1881 img_request); 2003 img_request);
@@ -1897,15 +2019,54 @@ static void rbd_img_request_destroy(struct kref *kref)
1897 rbd_img_obj_request_del(img_request, obj_request); 2019 rbd_img_obj_request_del(img_request, obj_request);
1898 rbd_assert(img_request->obj_request_count == 0); 2020 rbd_assert(img_request->obj_request_count == 0);
1899 2021
2022 if (img_request_layered_test(img_request)) {
2023 img_request_layered_clear(img_request);
2024 rbd_dev_parent_put(img_request->rbd_dev);
2025 }
2026
1900 if (img_request_write_test(img_request)) 2027 if (img_request_write_test(img_request))
1901 ceph_put_snap_context(img_request->snapc); 2028 ceph_put_snap_context(img_request->snapc);
1902 2029
1903 if (img_request_child_test(img_request))
1904 rbd_obj_request_put(img_request->obj_request);
1905
1906 kmem_cache_free(rbd_img_request_cache, img_request); 2030 kmem_cache_free(rbd_img_request_cache, img_request);
1907} 2031}
1908 2032
2033static struct rbd_img_request *rbd_parent_request_create(
2034 struct rbd_obj_request *obj_request,
2035 u64 img_offset, u64 length)
2036{
2037 struct rbd_img_request *parent_request;
2038 struct rbd_device *rbd_dev;
2039
2040 rbd_assert(obj_request->img_request);
2041 rbd_dev = obj_request->img_request->rbd_dev;
2042
2043 parent_request = rbd_img_request_create(rbd_dev->parent,
2044 img_offset, length, false);
2045 if (!parent_request)
2046 return NULL;
2047
2048 img_request_child_set(parent_request);
2049 rbd_obj_request_get(obj_request);
2050 parent_request->obj_request = obj_request;
2051
2052 return parent_request;
2053}
2054
2055static void rbd_parent_request_destroy(struct kref *kref)
2056{
2057 struct rbd_img_request *parent_request;
2058 struct rbd_obj_request *orig_request;
2059
2060 parent_request = container_of(kref, struct rbd_img_request, kref);
2061 orig_request = parent_request->obj_request;
2062
2063 parent_request->obj_request = NULL;
2064 rbd_obj_request_put(orig_request);
2065 img_request_child_clear(parent_request);
2066
2067 rbd_img_request_destroy(kref);
2068}
2069
1909static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) 2070static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1910{ 2071{
1911 struct rbd_img_request *img_request; 2072 struct rbd_img_request *img_request;
@@ -2114,7 +2275,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2114{ 2275{
2115 struct rbd_img_request *img_request; 2276 struct rbd_img_request *img_request;
2116 struct rbd_device *rbd_dev; 2277 struct rbd_device *rbd_dev;
2117 u64 length; 2278 struct page **pages;
2118 u32 page_count; 2279 u32 page_count;
2119 2280
2120 rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2281 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
@@ -2124,12 +2285,14 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2124 2285
2125 rbd_dev = img_request->rbd_dev; 2286 rbd_dev = img_request->rbd_dev;
2126 rbd_assert(rbd_dev); 2287 rbd_assert(rbd_dev);
2127 length = (u64)1 << rbd_dev->header.obj_order;
2128 page_count = (u32)calc_pages_for(0, length);
2129 2288
2130 rbd_assert(obj_request->copyup_pages); 2289 pages = obj_request->copyup_pages;
2131 ceph_release_page_vector(obj_request->copyup_pages, page_count); 2290 rbd_assert(pages != NULL);
2132 obj_request->copyup_pages = NULL; 2291 obj_request->copyup_pages = NULL;
2292 page_count = obj_request->copyup_page_count;
2293 rbd_assert(page_count);
2294 obj_request->copyup_page_count = 0;
2295 ceph_release_page_vector(pages, page_count);
2133 2296
2134 /* 2297 /*
2135 * We want the transfer count to reflect the size of the 2298 * We want the transfer count to reflect the size of the
@@ -2153,9 +2316,11 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2153 struct ceph_osd_client *osdc; 2316 struct ceph_osd_client *osdc;
2154 struct rbd_device *rbd_dev; 2317 struct rbd_device *rbd_dev;
2155 struct page **pages; 2318 struct page **pages;
2156 int result; 2319 u32 page_count;
2157 u64 obj_size; 2320 int img_result;
2158 u64 xferred; 2321 u64 parent_length;
2322 u64 offset;
2323 u64 length;
2159 2324
2160 rbd_assert(img_request_child_test(img_request)); 2325 rbd_assert(img_request_child_test(img_request));
2161 2326
@@ -2164,46 +2329,74 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2164 pages = img_request->copyup_pages; 2329 pages = img_request->copyup_pages;
2165 rbd_assert(pages != NULL); 2330 rbd_assert(pages != NULL);
2166 img_request->copyup_pages = NULL; 2331 img_request->copyup_pages = NULL;
2332 page_count = img_request->copyup_page_count;
2333 rbd_assert(page_count);
2334 img_request->copyup_page_count = 0;
2167 2335
2168 orig_request = img_request->obj_request; 2336 orig_request = img_request->obj_request;
2169 rbd_assert(orig_request != NULL); 2337 rbd_assert(orig_request != NULL);
2170 rbd_assert(orig_request->type == OBJ_REQUEST_BIO); 2338 rbd_assert(obj_request_type_valid(orig_request->type));
2171 result = img_request->result; 2339 img_result = img_request->result;
2172 obj_size = img_request->length; 2340 parent_length = img_request->length;
2173 xferred = img_request->xferred; 2341 rbd_assert(parent_length == img_request->xferred);
2342 rbd_img_request_put(img_request);
2174 2343
2175 rbd_dev = img_request->rbd_dev; 2344 rbd_assert(orig_request->img_request);
2345 rbd_dev = orig_request->img_request->rbd_dev;
2176 rbd_assert(rbd_dev); 2346 rbd_assert(rbd_dev);
2177 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2178 2347
2179 rbd_img_request_put(img_request); 2348 /*
2349 * If the overlap has become 0 (most likely because the
2350 * image has been flattened) we need to free the pages
2351 * and re-submit the original write request.
2352 */
2353 if (!rbd_dev->parent_overlap) {
2354 struct ceph_osd_client *osdc;
2180 2355
2181 if (result) 2356 ceph_release_page_vector(pages, page_count);
2182 goto out_err; 2357 osdc = &rbd_dev->rbd_client->client->osdc;
2358 img_result = rbd_obj_request_submit(osdc, orig_request);
2359 if (!img_result)
2360 return;
2361 }
2183 2362
2184 /* Allocate the new copyup osd request for the original request */ 2363 if (img_result)
2364 goto out_err;
2185 2365
2186 result = -ENOMEM; 2366 /*
2187 rbd_assert(!orig_request->osd_req); 2367 * The original osd request is of no use to use any more.
2368 * We need a new one that can hold the two ops in a copyup
2369 * request. Allocate the new copyup osd request for the
2370 * original request, and release the old one.
2371 */
2372 img_result = -ENOMEM;
2188 osd_req = rbd_osd_req_create_copyup(orig_request); 2373 osd_req = rbd_osd_req_create_copyup(orig_request);
2189 if (!osd_req) 2374 if (!osd_req)
2190 goto out_err; 2375 goto out_err;
2376 rbd_osd_req_destroy(orig_request->osd_req);
2191 orig_request->osd_req = osd_req; 2377 orig_request->osd_req = osd_req;
2192 orig_request->copyup_pages = pages; 2378 orig_request->copyup_pages = pages;
2379 orig_request->copyup_page_count = page_count;
2193 2380
2194 /* Initialize the copyup op */ 2381 /* Initialize the copyup op */
2195 2382
2196 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); 2383 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2197 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0, 2384 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2198 false, false); 2385 false, false);
2199 2386
2200 /* Then the original write request op */ 2387 /* Then the original write request op */
2201 2388
2389 offset = orig_request->offset;
2390 length = orig_request->length;
2202 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, 2391 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2203 orig_request->offset, 2392 offset, length, 0, 0);
2204 orig_request->length, 0, 0); 2393 if (orig_request->type == OBJ_REQUEST_BIO)
2205 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list, 2394 osd_req_op_extent_osd_data_bio(osd_req, 1,
2206 orig_request->length); 2395 orig_request->bio_list, length);
2396 else
2397 osd_req_op_extent_osd_data_pages(osd_req, 1,
2398 orig_request->pages, length,
2399 offset & ~PAGE_MASK, false, false);
2207 2400
2208 rbd_osd_req_format_write(orig_request); 2401 rbd_osd_req_format_write(orig_request);
2209 2402
@@ -2211,13 +2404,13 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2211 2404
2212 orig_request->callback = rbd_img_obj_copyup_callback; 2405 orig_request->callback = rbd_img_obj_copyup_callback;
2213 osdc = &rbd_dev->rbd_client->client->osdc; 2406 osdc = &rbd_dev->rbd_client->client->osdc;
2214 result = rbd_obj_request_submit(osdc, orig_request); 2407 img_result = rbd_obj_request_submit(osdc, orig_request);
2215 if (!result) 2408 if (!img_result)
2216 return; 2409 return;
2217out_err: 2410out_err:
2218 /* Record the error code and complete the request */ 2411 /* Record the error code and complete the request */
2219 2412
2220 orig_request->result = result; 2413 orig_request->result = img_result;
2221 orig_request->xferred = 0; 2414 orig_request->xferred = 0;
2222 obj_request_done_set(orig_request); 2415 obj_request_done_set(orig_request);
2223 rbd_obj_request_complete(orig_request); 2416 rbd_obj_request_complete(orig_request);
@@ -2249,7 +2442,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2249 int result; 2442 int result;
2250 2443
2251 rbd_assert(obj_request_img_data_test(obj_request)); 2444 rbd_assert(obj_request_img_data_test(obj_request));
2252 rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2445 rbd_assert(obj_request_type_valid(obj_request->type));
2253 2446
2254 img_request = obj_request->img_request; 2447 img_request = obj_request->img_request;
2255 rbd_assert(img_request != NULL); 2448 rbd_assert(img_request != NULL);
@@ -2257,15 +2450,6 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2257 rbd_assert(rbd_dev->parent != NULL); 2450 rbd_assert(rbd_dev->parent != NULL);
2258 2451
2259 /* 2452 /*
2260 * First things first. The original osd request is of no
2261 * use to use any more, we'll need a new one that can hold
2262 * the two ops in a copyup request. We'll get that later,
2263 * but for now we can release the old one.
2264 */
2265 rbd_osd_req_destroy(obj_request->osd_req);
2266 obj_request->osd_req = NULL;
2267
2268 /*
2269 * Determine the byte range covered by the object in the 2453 * Determine the byte range covered by the object in the
2270 * child image to which the original request was to be sent. 2454 * child image to which the original request was to be sent.
2271 */ 2455 */
@@ -2295,18 +2479,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2295 } 2479 }
2296 2480
2297 result = -ENOMEM; 2481 result = -ENOMEM;
2298 parent_request = rbd_img_request_create(rbd_dev->parent, 2482 parent_request = rbd_parent_request_create(obj_request,
2299 img_offset, length, 2483 img_offset, length);
2300 false, true);
2301 if (!parent_request) 2484 if (!parent_request)
2302 goto out_err; 2485 goto out_err;
2303 rbd_obj_request_get(obj_request);
2304 parent_request->obj_request = obj_request;
2305 2486
2306 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); 2487 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2307 if (result) 2488 if (result)
2308 goto out_err; 2489 goto out_err;
2309 parent_request->copyup_pages = pages; 2490 parent_request->copyup_pages = pages;
2491 parent_request->copyup_page_count = page_count;
2310 2492
2311 parent_request->callback = rbd_img_obj_parent_read_full_callback; 2493 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2312 result = rbd_img_request_submit(parent_request); 2494 result = rbd_img_request_submit(parent_request);
@@ -2314,6 +2496,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2314 return 0; 2496 return 0;
2315 2497
2316 parent_request->copyup_pages = NULL; 2498 parent_request->copyup_pages = NULL;
2499 parent_request->copyup_page_count = 0;
2317 parent_request->obj_request = NULL; 2500 parent_request->obj_request = NULL;
2318 rbd_obj_request_put(obj_request); 2501 rbd_obj_request_put(obj_request);
2319out_err: 2502out_err:
@@ -2331,6 +2514,7 @@ out_err:
2331static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) 2514static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2332{ 2515{
2333 struct rbd_obj_request *orig_request; 2516 struct rbd_obj_request *orig_request;
2517 struct rbd_device *rbd_dev;
2334 int result; 2518 int result;
2335 2519
2336 rbd_assert(!obj_request_img_data_test(obj_request)); 2520 rbd_assert(!obj_request_img_data_test(obj_request));
@@ -2353,8 +2537,21 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2353 obj_request->xferred, obj_request->length); 2537 obj_request->xferred, obj_request->length);
2354 rbd_obj_request_put(obj_request); 2538 rbd_obj_request_put(obj_request);
2355 2539
2356 rbd_assert(orig_request); 2540 /*
2357 rbd_assert(orig_request->img_request); 2541 * If the overlap has become 0 (most likely because the
2542 * image has been flattened) we need to free the pages
2543 * and re-submit the original write request.
2544 */
2545 rbd_dev = orig_request->img_request->rbd_dev;
2546 if (!rbd_dev->parent_overlap) {
2547 struct ceph_osd_client *osdc;
2548
2549 rbd_obj_request_put(orig_request);
2550 osdc = &rbd_dev->rbd_client->client->osdc;
2551 result = rbd_obj_request_submit(osdc, orig_request);
2552 if (!result)
2553 return;
2554 }
2358 2555
2359 /* 2556 /*
2360 * Our only purpose here is to determine whether the object 2557 * Our only purpose here is to determine whether the object
@@ -2512,14 +2709,36 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2512 struct rbd_obj_request *obj_request; 2709 struct rbd_obj_request *obj_request;
2513 struct rbd_device *rbd_dev; 2710 struct rbd_device *rbd_dev;
2514 u64 obj_end; 2711 u64 obj_end;
2712 u64 img_xferred;
2713 int img_result;
2515 2714
2516 rbd_assert(img_request_child_test(img_request)); 2715 rbd_assert(img_request_child_test(img_request));
2517 2716
2717 /* First get what we need from the image request and release it */
2718
2518 obj_request = img_request->obj_request; 2719 obj_request = img_request->obj_request;
2720 img_xferred = img_request->xferred;
2721 img_result = img_request->result;
2722 rbd_img_request_put(img_request);
2723
2724 /*
2725 * If the overlap has become 0 (most likely because the
2726 * image has been flattened) we need to re-submit the
2727 * original request.
2728 */
2519 rbd_assert(obj_request); 2729 rbd_assert(obj_request);
2520 rbd_assert(obj_request->img_request); 2730 rbd_assert(obj_request->img_request);
2731 rbd_dev = obj_request->img_request->rbd_dev;
2732 if (!rbd_dev->parent_overlap) {
2733 struct ceph_osd_client *osdc;
2734
2735 osdc = &rbd_dev->rbd_client->client->osdc;
2736 img_result = rbd_obj_request_submit(osdc, obj_request);
2737 if (!img_result)
2738 return;
2739 }
2521 2740
2522 obj_request->result = img_request->result; 2741 obj_request->result = img_result;
2523 if (obj_request->result) 2742 if (obj_request->result)
2524 goto out; 2743 goto out;
2525 2744
@@ -2532,7 +2751,6 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2532 */ 2751 */
2533 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); 2752 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2534 obj_end = obj_request->img_offset + obj_request->length; 2753 obj_end = obj_request->img_offset + obj_request->length;
2535 rbd_dev = obj_request->img_request->rbd_dev;
2536 if (obj_end > rbd_dev->parent_overlap) { 2754 if (obj_end > rbd_dev->parent_overlap) {
2537 u64 xferred = 0; 2755 u64 xferred = 0;
2538 2756
@@ -2540,43 +2758,39 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2540 xferred = rbd_dev->parent_overlap - 2758 xferred = rbd_dev->parent_overlap -
2541 obj_request->img_offset; 2759 obj_request->img_offset;
2542 2760
2543 obj_request->xferred = min(img_request->xferred, xferred); 2761 obj_request->xferred = min(img_xferred, xferred);
2544 } else { 2762 } else {
2545 obj_request->xferred = img_request->xferred; 2763 obj_request->xferred = img_xferred;
2546 } 2764 }
2547out: 2765out:
2548 rbd_img_request_put(img_request);
2549 rbd_img_obj_request_read_callback(obj_request); 2766 rbd_img_obj_request_read_callback(obj_request);
2550 rbd_obj_request_complete(obj_request); 2767 rbd_obj_request_complete(obj_request);
2551} 2768}
2552 2769
2553static void rbd_img_parent_read(struct rbd_obj_request *obj_request) 2770static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2554{ 2771{
2555 struct rbd_device *rbd_dev;
2556 struct rbd_img_request *img_request; 2772 struct rbd_img_request *img_request;
2557 int result; 2773 int result;
2558 2774
2559 rbd_assert(obj_request_img_data_test(obj_request)); 2775 rbd_assert(obj_request_img_data_test(obj_request));
2560 rbd_assert(obj_request->img_request != NULL); 2776 rbd_assert(obj_request->img_request != NULL);
2561 rbd_assert(obj_request->result == (s32) -ENOENT); 2777 rbd_assert(obj_request->result == (s32) -ENOENT);
2562 rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2778 rbd_assert(obj_request_type_valid(obj_request->type));
2563 2779
2564 rbd_dev = obj_request->img_request->rbd_dev;
2565 rbd_assert(rbd_dev->parent != NULL);
2566 /* rbd_read_finish(obj_request, obj_request->length); */ 2780 /* rbd_read_finish(obj_request, obj_request->length); */
2567 img_request = rbd_img_request_create(rbd_dev->parent, 2781 img_request = rbd_parent_request_create(obj_request,
2568 obj_request->img_offset, 2782 obj_request->img_offset,
2569 obj_request->length, 2783 obj_request->length);
2570 false, true);
2571 result = -ENOMEM; 2784 result = -ENOMEM;
2572 if (!img_request) 2785 if (!img_request)
2573 goto out_err; 2786 goto out_err;
2574 2787
2575 rbd_obj_request_get(obj_request); 2788 if (obj_request->type == OBJ_REQUEST_BIO)
2576 img_request->obj_request = obj_request; 2789 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2577 2790 obj_request->bio_list);
2578 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, 2791 else
2579 obj_request->bio_list); 2792 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2793 obj_request->pages);
2580 if (result) 2794 if (result)
2581 goto out_err; 2795 goto out_err;
2582 2796
@@ -2626,6 +2840,7 @@ out:
2626static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) 2840static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2627{ 2841{
2628 struct rbd_device *rbd_dev = (struct rbd_device *)data; 2842 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2843 int ret;
2629 2844
2630 if (!rbd_dev) 2845 if (!rbd_dev)
2631 return; 2846 return;
@@ -2633,7 +2848,9 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2633 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, 2848 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2634 rbd_dev->header_name, (unsigned long long)notify_id, 2849 rbd_dev->header_name, (unsigned long long)notify_id,
2635 (unsigned int)opcode); 2850 (unsigned int)opcode);
2636 (void)rbd_dev_refresh(rbd_dev); 2851 ret = rbd_dev_refresh(rbd_dev);
2852 if (ret)
2853 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2637 2854
2638 rbd_obj_notify_ack(rbd_dev, notify_id); 2855 rbd_obj_notify_ack(rbd_dev, notify_id);
2639} 2856}
@@ -2642,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2642 * Request sync osd watch/unwatch. The value of "start" determines 2859 * Request sync osd watch/unwatch. The value of "start" determines
2643 * whether a watch request is being initiated or torn down. 2860 * whether a watch request is being initiated or torn down.
2644 */ 2861 */
2645static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) 2862static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2646{ 2863{
2647 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2864 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2648 struct rbd_obj_request *obj_request; 2865 struct rbd_obj_request *obj_request;
@@ -2676,7 +2893,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2676 rbd_dev->watch_request->osd_req); 2893 rbd_dev->watch_request->osd_req);
2677 2894
2678 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, 2895 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2679 rbd_dev->watch_event->cookie, 0, start); 2896 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2680 rbd_osd_req_format_write(obj_request); 2897 rbd_osd_req_format_write(obj_request);
2681 2898
2682 ret = rbd_obj_request_submit(osdc, obj_request); 2899 ret = rbd_obj_request_submit(osdc, obj_request);
@@ -2869,9 +3086,16 @@ static void rbd_request_fn(struct request_queue *q)
2869 goto end_request; /* Shouldn't happen */ 3086 goto end_request; /* Shouldn't happen */
2870 } 3087 }
2871 3088
3089 result = -EIO;
3090 if (offset + length > rbd_dev->mapping.size) {
3091 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3092 offset, length, rbd_dev->mapping.size);
3093 goto end_request;
3094 }
3095
2872 result = -ENOMEM; 3096 result = -ENOMEM;
2873 img_request = rbd_img_request_create(rbd_dev, offset, length, 3097 img_request = rbd_img_request_create(rbd_dev, offset, length,
2874 write_request, false); 3098 write_request);
2875 if (!img_request) 3099 if (!img_request)
2876 goto end_request; 3100 goto end_request;
2877 3101
@@ -3022,17 +3246,11 @@ out:
3022} 3246}
3023 3247
3024/* 3248/*
3025 * Read the complete header for the given rbd device. 3249 * Read the complete header for the given rbd device. On successful
3026 * 3250 * return, the rbd_dev->header field will contain up-to-date
3027 * Returns a pointer to a dynamically-allocated buffer containing 3251 * information about the image.
3028 * the complete and validated header. Caller can pass the address
3029 * of a variable that will be filled in with the version of the
3030 * header object at the time it was read.
3031 *
3032 * Returns a pointer-coded errno if a failure occurs.
3033 */ 3252 */
3034static struct rbd_image_header_ondisk * 3253static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3035rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3036{ 3254{
3037 struct rbd_image_header_ondisk *ondisk = NULL; 3255 struct rbd_image_header_ondisk *ondisk = NULL;
3038 u32 snap_count = 0; 3256 u32 snap_count = 0;
@@ -3057,22 +3275,22 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3057 size += names_size; 3275 size += names_size;
3058 ondisk = kmalloc(size, GFP_KERNEL); 3276 ondisk = kmalloc(size, GFP_KERNEL);
3059 if (!ondisk) 3277 if (!ondisk)
3060 return ERR_PTR(-ENOMEM); 3278 return -ENOMEM;
3061 3279
3062 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, 3280 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3063 0, size, ondisk); 3281 0, size, ondisk);
3064 if (ret < 0) 3282 if (ret < 0)
3065 goto out_err; 3283 goto out;
3066 if ((size_t)ret < size) { 3284 if ((size_t)ret < size) {
3067 ret = -ENXIO; 3285 ret = -ENXIO;
3068 rbd_warn(rbd_dev, "short header read (want %zd got %d)", 3286 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3069 size, ret); 3287 size, ret);
3070 goto out_err; 3288 goto out;
3071 } 3289 }
3072 if (!rbd_dev_ondisk_valid(ondisk)) { 3290 if (!rbd_dev_ondisk_valid(ondisk)) {
3073 ret = -ENXIO; 3291 ret = -ENXIO;
3074 rbd_warn(rbd_dev, "invalid header"); 3292 rbd_warn(rbd_dev, "invalid header");
3075 goto out_err; 3293 goto out;
3076 } 3294 }
3077 3295
3078 names_size = le64_to_cpu(ondisk->snap_names_len); 3296 names_size = le64_to_cpu(ondisk->snap_names_len);
@@ -3080,85 +3298,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3080 snap_count = le32_to_cpu(ondisk->snap_count); 3298 snap_count = le32_to_cpu(ondisk->snap_count);
3081 } while (snap_count != want_count); 3299 } while (snap_count != want_count);
3082 3300
3083 return ondisk; 3301 ret = rbd_header_from_disk(rbd_dev, ondisk);
3084 3302out:
3085out_err:
3086 kfree(ondisk);
3087
3088 return ERR_PTR(ret);
3089}
3090
3091/*
3092 * reload the ondisk the header
3093 */
3094static int rbd_read_header(struct rbd_device *rbd_dev,
3095 struct rbd_image_header *header)
3096{
3097 struct rbd_image_header_ondisk *ondisk;
3098 int ret;
3099
3100 ondisk = rbd_dev_v1_header_read(rbd_dev);
3101 if (IS_ERR(ondisk))
3102 return PTR_ERR(ondisk);
3103 ret = rbd_header_from_disk(header, ondisk);
3104 kfree(ondisk); 3303 kfree(ondisk);
3105 3304
3106 return ret; 3305 return ret;
3107} 3306}
3108 3307
3109static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
3110{
3111 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
3112 return;
3113
3114 if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
3115 sector_t size;
3116
3117 rbd_dev->mapping.size = rbd_dev->header.image_size;
3118 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3119 dout("setting size to %llu sectors", (unsigned long long)size);
3120 set_capacity(rbd_dev->disk, size);
3121 }
3122}
3123
3124/*
3125 * only read the first part of the ondisk header, without the snaps info
3126 */
3127static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
3128{
3129 int ret;
3130 struct rbd_image_header h;
3131
3132 ret = rbd_read_header(rbd_dev, &h);
3133 if (ret < 0)
3134 return ret;
3135
3136 down_write(&rbd_dev->header_rwsem);
3137
3138 /* Update image size, and check for resize of mapped image */
3139 rbd_dev->header.image_size = h.image_size;
3140 rbd_update_mapping_size(rbd_dev);
3141
3142 /* rbd_dev->header.object_prefix shouldn't change */
3143 kfree(rbd_dev->header.snap_sizes);
3144 kfree(rbd_dev->header.snap_names);
3145 /* osd requests may still refer to snapc */
3146 ceph_put_snap_context(rbd_dev->header.snapc);
3147
3148 rbd_dev->header.image_size = h.image_size;
3149 rbd_dev->header.snapc = h.snapc;
3150 rbd_dev->header.snap_names = h.snap_names;
3151 rbd_dev->header.snap_sizes = h.snap_sizes;
3152 /* Free the extra copy of the object prefix */
3153 if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
3154 rbd_warn(rbd_dev, "object prefix changed (ignoring)");
3155 kfree(h.object_prefix);
3156
3157 up_write(&rbd_dev->header_rwsem);
3158
3159 return ret;
3160}
3161
3162/* 3308/*
3163 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to 3309 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3164 * has disappeared from the (just updated) snapshot context. 3310 * has disappeared from the (just updated) snapshot context.
@@ -3180,26 +3326,29 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
3180 3326
3181static int rbd_dev_refresh(struct rbd_device *rbd_dev) 3327static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3182{ 3328{
3183 u64 image_size; 3329 u64 mapping_size;
3184 int ret; 3330 int ret;
3185 3331
3186 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 3332 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3187 image_size = rbd_dev->header.image_size; 3333 mapping_size = rbd_dev->mapping.size;
3188 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 3334 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3189 if (rbd_dev->image_format == 1) 3335 if (rbd_dev->image_format == 1)
3190 ret = rbd_dev_v1_refresh(rbd_dev); 3336 ret = rbd_dev_v1_header_info(rbd_dev);
3191 else 3337 else
3192 ret = rbd_dev_v2_refresh(rbd_dev); 3338 ret = rbd_dev_v2_header_info(rbd_dev);
3193 3339
3194 /* If it's a mapped snapshot, validate its EXISTS flag */ 3340 /* If it's a mapped snapshot, validate its EXISTS flag */
3195 3341
3196 rbd_exists_validate(rbd_dev); 3342 rbd_exists_validate(rbd_dev);
3197 mutex_unlock(&ctl_mutex); 3343 mutex_unlock(&ctl_mutex);
3198 if (ret) 3344 if (mapping_size != rbd_dev->mapping.size) {
3199 rbd_warn(rbd_dev, "got notification but failed to " 3345 sector_t size;
3200 " update snaps: %d\n", ret); 3346
3201 if (image_size != rbd_dev->header.image_size) 3347 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3348 dout("setting size to %llu sectors", (unsigned long long)size);
3349 set_capacity(rbd_dev->disk, size);
3202 revalidate_disk(rbd_dev->disk); 3350 revalidate_disk(rbd_dev->disk);
3351 }
3203 3352
3204 return ret; 3353 return ret;
3205} 3354}
@@ -3403,6 +3552,8 @@ static ssize_t rbd_image_refresh(struct device *dev,
3403 int ret; 3552 int ret;
3404 3553
3405 ret = rbd_dev_refresh(rbd_dev); 3554 ret = rbd_dev_refresh(rbd_dev);
3555 if (ret)
3556 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3406 3557
3407 return ret < 0 ? ret : size; 3558 return ret < 0 ? ret : size;
3408} 3559}
@@ -3501,6 +3652,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3501 3652
3502 spin_lock_init(&rbd_dev->lock); 3653 spin_lock_init(&rbd_dev->lock);
3503 rbd_dev->flags = 0; 3654 rbd_dev->flags = 0;
3655 atomic_set(&rbd_dev->parent_ref, 0);
3504 INIT_LIST_HEAD(&rbd_dev->node); 3656 INIT_LIST_HEAD(&rbd_dev->node);
3505 init_rwsem(&rbd_dev->header_rwsem); 3657 init_rwsem(&rbd_dev->header_rwsem);
3506 3658
@@ -3650,6 +3802,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3650 __le64 snapid; 3802 __le64 snapid;
3651 void *p; 3803 void *p;
3652 void *end; 3804 void *end;
3805 u64 pool_id;
3653 char *image_id; 3806 char *image_id;
3654 u64 overlap; 3807 u64 overlap;
3655 int ret; 3808 int ret;
@@ -3680,18 +3833,37 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3680 p = reply_buf; 3833 p = reply_buf;
3681 end = reply_buf + ret; 3834 end = reply_buf + ret;
3682 ret = -ERANGE; 3835 ret = -ERANGE;
3683 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); 3836 ceph_decode_64_safe(&p, end, pool_id, out_err);
3684 if (parent_spec->pool_id == CEPH_NOPOOL) 3837 if (pool_id == CEPH_NOPOOL) {
3838 /*
3839 * Either the parent never existed, or we have
3840 * record of it but the image got flattened so it no
3841 * longer has a parent. When the parent of a
3842 * layered image disappears we immediately set the
3843 * overlap to 0. The effect of this is that all new
3844 * requests will be treated as if the image had no
3845 * parent.
3846 */
3847 if (rbd_dev->parent_overlap) {
3848 rbd_dev->parent_overlap = 0;
3849 smp_mb();
3850 rbd_dev_parent_put(rbd_dev);
3851 pr_info("%s: clone image has been flattened\n",
3852 rbd_dev->disk->disk_name);
3853 }
3854
3685 goto out; /* No parent? No problem. */ 3855 goto out; /* No parent? No problem. */
3856 }
3686 3857
3687 /* The ceph file layout needs to fit pool id in 32 bits */ 3858 /* The ceph file layout needs to fit pool id in 32 bits */
3688 3859
3689 ret = -EIO; 3860 ret = -EIO;
3690 if (parent_spec->pool_id > (u64)U32_MAX) { 3861 if (pool_id > (u64)U32_MAX) {
3691 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", 3862 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3692 (unsigned long long)parent_spec->pool_id, U32_MAX); 3863 (unsigned long long)pool_id, U32_MAX);
3693 goto out_err; 3864 goto out_err;
3694 } 3865 }
3866 parent_spec->pool_id = pool_id;
3695 3867
3696 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 3868 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3697 if (IS_ERR(image_id)) { 3869 if (IS_ERR(image_id)) {
@@ -3702,9 +3874,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3702 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); 3874 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3703 ceph_decode_64_safe(&p, end, overlap, out_err); 3875 ceph_decode_64_safe(&p, end, overlap, out_err);
3704 3876
3705 rbd_dev->parent_overlap = overlap; 3877 if (overlap) {
3706 rbd_dev->parent_spec = parent_spec; 3878 rbd_spec_put(rbd_dev->parent_spec);
3707 parent_spec = NULL; /* rbd_dev now owns this */ 3879 rbd_dev->parent_spec = parent_spec;
3880 parent_spec = NULL; /* rbd_dev now owns this */
3881 rbd_dev->parent_overlap = overlap;
3882 } else {
3883 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3884 }
3708out: 3885out:
3709 ret = 0; 3886 ret = 0;
3710out_err: 3887out_err:
@@ -4002,6 +4179,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4002 for (i = 0; i < snap_count; i++) 4179 for (i = 0; i < snap_count; i++)
4003 snapc->snaps[i] = ceph_decode_64(&p); 4180 snapc->snaps[i] = ceph_decode_64(&p);
4004 4181
4182 ceph_put_snap_context(rbd_dev->header.snapc);
4005 rbd_dev->header.snapc = snapc; 4183 rbd_dev->header.snapc = snapc;
4006 4184
4007 dout(" snap context seq = %llu, snap_count = %u\n", 4185 dout(" snap context seq = %llu, snap_count = %u\n",
@@ -4053,21 +4231,56 @@ out:
4053 return snap_name; 4231 return snap_name;
4054} 4232}
4055 4233
4056static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) 4234static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4057{ 4235{
4236 bool first_time = rbd_dev->header.object_prefix == NULL;
4058 int ret; 4237 int ret;
4059 4238
4060 down_write(&rbd_dev->header_rwsem); 4239 down_write(&rbd_dev->header_rwsem);
4061 4240
4241 if (first_time) {
4242 ret = rbd_dev_v2_header_onetime(rbd_dev);
4243 if (ret)
4244 goto out;
4245 }
4246
4247 /*
4248 * If the image supports layering, get the parent info. We
4249 * need to probe the first time regardless. Thereafter we
4250 * only need to if there's a parent, to see if it has
4251 * disappeared due to the mapped image getting flattened.
4252 */
4253 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4254 (first_time || rbd_dev->parent_spec)) {
4255 bool warn;
4256
4257 ret = rbd_dev_v2_parent_info(rbd_dev);
4258 if (ret)
4259 goto out;
4260
4261 /*
4262 * Print a warning if this is the initial probe and
4263 * the image has a parent. Don't print it if the
4264 * image now being probed is itself a parent. We
4265 * can tell at this point because we won't know its
4266 * pool name yet (just its pool id).
4267 */
4268 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4269 if (first_time && warn)
4270 rbd_warn(rbd_dev, "WARNING: kernel layering "
4271 "is EXPERIMENTAL!");
4272 }
4273
4062 ret = rbd_dev_v2_image_size(rbd_dev); 4274 ret = rbd_dev_v2_image_size(rbd_dev);
4063 if (ret) 4275 if (ret)
4064 goto out; 4276 goto out;
4065 rbd_update_mapping_size(rbd_dev); 4277
4278 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4279 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4280 rbd_dev->mapping.size = rbd_dev->header.image_size;
4066 4281
4067 ret = rbd_dev_v2_snap_context(rbd_dev); 4282 ret = rbd_dev_v2_snap_context(rbd_dev);
4068 dout("rbd_dev_v2_snap_context returned %d\n", ret); 4283 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4069 if (ret)
4070 goto out;
4071out: 4284out:
4072 up_write(&rbd_dev->header_rwsem); 4285 up_write(&rbd_dev->header_rwsem);
4073 4286
@@ -4490,10 +4703,10 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4490{ 4703{
4491 struct rbd_image_header *header; 4704 struct rbd_image_header *header;
4492 4705
4493 rbd_dev_remove_parent(rbd_dev); 4706 /* Drop parent reference unless it's already been done (or none) */
4494 rbd_spec_put(rbd_dev->parent_spec); 4707
4495 rbd_dev->parent_spec = NULL; 4708 if (rbd_dev->parent_overlap)
4496 rbd_dev->parent_overlap = 0; 4709 rbd_dev_parent_put(rbd_dev);
4497 4710
4498 /* Free dynamic fields from the header, then zero it out */ 4711 /* Free dynamic fields from the header, then zero it out */
4499 4712
@@ -4505,72 +4718,22 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4505 memset(header, 0, sizeof (*header)); 4718 memset(header, 0, sizeof (*header));
4506} 4719}
4507 4720
4508static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) 4721static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4509{ 4722{
4510 int ret; 4723 int ret;
4511 4724
4512 /* Populate rbd image metadata */
4513
4514 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4515 if (ret < 0)
4516 goto out_err;
4517
4518 /* Version 1 images have no parent (no layering) */
4519
4520 rbd_dev->parent_spec = NULL;
4521 rbd_dev->parent_overlap = 0;
4522
4523 dout("discovered version 1 image, header name is %s\n",
4524 rbd_dev->header_name);
4525
4526 return 0;
4527
4528out_err:
4529 kfree(rbd_dev->header_name);
4530 rbd_dev->header_name = NULL;
4531 kfree(rbd_dev->spec->image_id);
4532 rbd_dev->spec->image_id = NULL;
4533
4534 return ret;
4535}
4536
4537static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4538{
4539 int ret;
4540
4541 ret = rbd_dev_v2_image_size(rbd_dev);
4542 if (ret)
4543 goto out_err;
4544
4545 /* Get the object prefix (a.k.a. block_name) for the image */
4546
4547 ret = rbd_dev_v2_object_prefix(rbd_dev); 4725 ret = rbd_dev_v2_object_prefix(rbd_dev);
4548 if (ret) 4726 if (ret)
4549 goto out_err; 4727 goto out_err;
4550 4728
4551 /* Get the and check features for the image */ 4729 /*
4552 4730 * Get the and check features for the image. Currently the
4731 * features are assumed to never change.
4732 */
4553 ret = rbd_dev_v2_features(rbd_dev); 4733 ret = rbd_dev_v2_features(rbd_dev);
4554 if (ret) 4734 if (ret)
4555 goto out_err; 4735 goto out_err;
4556 4736
4557 /* If the image supports layering, get the parent info */
4558
4559 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4560 ret = rbd_dev_v2_parent_info(rbd_dev);
4561 if (ret)
4562 goto out_err;
4563
4564 /*
4565 * Don't print a warning for parent images. We can
4566 * tell this point because we won't know its pool
4567 * name yet (just its pool id).
4568 */
4569 if (rbd_dev->spec->pool_name)
4570 rbd_warn(rbd_dev, "WARNING: kernel layering "
4571 "is EXPERIMENTAL!");
4572 }
4573
4574 /* If the image supports fancy striping, get its parameters */ 4737 /* If the image supports fancy striping, get its parameters */
4575 4738
4576 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { 4739 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
@@ -4578,28 +4741,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4578 if (ret < 0) 4741 if (ret < 0)
4579 goto out_err; 4742 goto out_err;
4580 } 4743 }
4581 4744 /* No support for crypto and compression type format 2 images */
4582 /* crypto and compression type aren't (yet) supported for v2 images */
4583
4584 rbd_dev->header.crypt_type = 0;
4585 rbd_dev->header.comp_type = 0;
4586
4587 /* Get the snapshot context, plus the header version */
4588
4589 ret = rbd_dev_v2_snap_context(rbd_dev);
4590 if (ret)
4591 goto out_err;
4592
4593 dout("discovered version 2 image, header name is %s\n",
4594 rbd_dev->header_name);
4595 4745
4596 return 0; 4746 return 0;
4597out_err: 4747out_err:
4598 rbd_dev->parent_overlap = 0; 4748 rbd_dev->header.features = 0;
4599 rbd_spec_put(rbd_dev->parent_spec);
4600 rbd_dev->parent_spec = NULL;
4601 kfree(rbd_dev->header_name);
4602 rbd_dev->header_name = NULL;
4603 kfree(rbd_dev->header.object_prefix); 4749 kfree(rbd_dev->header.object_prefix);
4604 rbd_dev->header.object_prefix = NULL; 4750 rbd_dev->header.object_prefix = NULL;
4605 4751
@@ -4628,15 +4774,16 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4628 if (!parent) 4774 if (!parent)
4629 goto out_err; 4775 goto out_err;
4630 4776
4631 ret = rbd_dev_image_probe(parent); 4777 ret = rbd_dev_image_probe(parent, false);
4632 if (ret < 0) 4778 if (ret < 0)
4633 goto out_err; 4779 goto out_err;
4634 rbd_dev->parent = parent; 4780 rbd_dev->parent = parent;
4781 atomic_set(&rbd_dev->parent_ref, 1);
4635 4782
4636 return 0; 4783 return 0;
4637out_err: 4784out_err:
4638 if (parent) { 4785 if (parent) {
4639 rbd_spec_put(rbd_dev->parent_spec); 4786 rbd_dev_unparent(rbd_dev);
4640 kfree(rbd_dev->header_name); 4787 kfree(rbd_dev->header_name);
4641 rbd_dev_destroy(parent); 4788 rbd_dev_destroy(parent);
4642 } else { 4789 } else {
@@ -4651,10 +4798,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4651{ 4798{
4652 int ret; 4799 int ret;
4653 4800
4654 ret = rbd_dev_mapping_set(rbd_dev);
4655 if (ret)
4656 return ret;
4657
4658 /* generate unique id: find highest unique id, add one */ 4801 /* generate unique id: find highest unique id, add one */
4659 rbd_dev_id_get(rbd_dev); 4802 rbd_dev_id_get(rbd_dev);
4660 4803
@@ -4676,13 +4819,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4676 if (ret) 4819 if (ret)
4677 goto err_out_blkdev; 4820 goto err_out_blkdev;
4678 4821
4679 ret = rbd_bus_add_dev(rbd_dev); 4822 ret = rbd_dev_mapping_set(rbd_dev);
4680 if (ret) 4823 if (ret)
4681 goto err_out_disk; 4824 goto err_out_disk;
4825 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4826
4827 ret = rbd_bus_add_dev(rbd_dev);
4828 if (ret)
4829 goto err_out_mapping;
4682 4830
4683 /* Everything's ready. Announce the disk to the world. */ 4831 /* Everything's ready. Announce the disk to the world. */
4684 4832
4685 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4686 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 4833 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4687 add_disk(rbd_dev->disk); 4834 add_disk(rbd_dev->disk);
4688 4835
@@ -4691,6 +4838,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4691 4838
4692 return ret; 4839 return ret;
4693 4840
4841err_out_mapping:
4842 rbd_dev_mapping_clear(rbd_dev);
4694err_out_disk: 4843err_out_disk:
4695 rbd_free_disk(rbd_dev); 4844 rbd_free_disk(rbd_dev);
4696err_out_blkdev: 4845err_out_blkdev:
@@ -4731,12 +4880,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4731 4880
4732static void rbd_dev_image_release(struct rbd_device *rbd_dev) 4881static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4733{ 4882{
4734 int ret;
4735
4736 rbd_dev_unprobe(rbd_dev); 4883 rbd_dev_unprobe(rbd_dev);
4737 ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4738 if (ret)
4739 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4740 kfree(rbd_dev->header_name); 4884 kfree(rbd_dev->header_name);
4741 rbd_dev->header_name = NULL; 4885 rbd_dev->header_name = NULL;
4742 rbd_dev->image_format = 0; 4886 rbd_dev->image_format = 0;
@@ -4748,10 +4892,11 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4748 4892
4749/* 4893/*
4750 * Probe for the existence of the header object for the given rbd 4894 * Probe for the existence of the header object for the given rbd
4751 * device. For format 2 images this includes determining the image 4895 * device. If this image is the one being mapped (i.e., not a
4752 * id. 4896 * parent), initiate a watch on its header object before using that
4897 * object to get detailed information about the rbd image.
4753 */ 4898 */
4754static int rbd_dev_image_probe(struct rbd_device *rbd_dev) 4899static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4755{ 4900{
4756 int ret; 4901 int ret;
4757 int tmp; 4902 int tmp;
@@ -4771,14 +4916,16 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
4771 if (ret) 4916 if (ret)
4772 goto err_out_format; 4917 goto err_out_format;
4773 4918
4774 ret = rbd_dev_header_watch_sync(rbd_dev, 1); 4919 if (mapping) {
4775 if (ret) 4920 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4776 goto out_header_name; 4921 if (ret)
4922 goto out_header_name;
4923 }
4777 4924
4778 if (rbd_dev->image_format == 1) 4925 if (rbd_dev->image_format == 1)
4779 ret = rbd_dev_v1_probe(rbd_dev); 4926 ret = rbd_dev_v1_header_info(rbd_dev);
4780 else 4927 else
4781 ret = rbd_dev_v2_probe(rbd_dev); 4928 ret = rbd_dev_v2_header_info(rbd_dev);
4782 if (ret) 4929 if (ret)
4783 goto err_out_watch; 4930 goto err_out_watch;
4784 4931
@@ -4787,15 +4934,22 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
4787 goto err_out_probe; 4934 goto err_out_probe;
4788 4935
4789 ret = rbd_dev_probe_parent(rbd_dev); 4936 ret = rbd_dev_probe_parent(rbd_dev);
4790 if (!ret) 4937 if (ret)
4791 return 0; 4938 goto err_out_probe;
4939
4940 dout("discovered format %u image, header name is %s\n",
4941 rbd_dev->image_format, rbd_dev->header_name);
4792 4942
4943 return 0;
4793err_out_probe: 4944err_out_probe:
4794 rbd_dev_unprobe(rbd_dev); 4945 rbd_dev_unprobe(rbd_dev);
4795err_out_watch: 4946err_out_watch:
4796 tmp = rbd_dev_header_watch_sync(rbd_dev, 0); 4947 if (mapping) {
4797 if (tmp) 4948 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4798 rbd_warn(rbd_dev, "unable to tear down watch request\n"); 4949 if (tmp)
4950 rbd_warn(rbd_dev, "unable to tear down "
4951 "watch request (%d)\n", tmp);
4952 }
4799out_header_name: 4953out_header_name:
4800 kfree(rbd_dev->header_name); 4954 kfree(rbd_dev->header_name);
4801 rbd_dev->header_name = NULL; 4955 rbd_dev->header_name = NULL;
@@ -4819,6 +4973,7 @@ static ssize_t rbd_add(struct bus_type *bus,
4819 struct rbd_spec *spec = NULL; 4973 struct rbd_spec *spec = NULL;
4820 struct rbd_client *rbdc; 4974 struct rbd_client *rbdc;
4821 struct ceph_osd_client *osdc; 4975 struct ceph_osd_client *osdc;
4976 bool read_only;
4822 int rc = -ENOMEM; 4977 int rc = -ENOMEM;
4823 4978
4824 if (!try_module_get(THIS_MODULE)) 4979 if (!try_module_get(THIS_MODULE))
@@ -4828,6 +4983,9 @@ static ssize_t rbd_add(struct bus_type *bus,
4828 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); 4983 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4829 if (rc < 0) 4984 if (rc < 0)
4830 goto err_out_module; 4985 goto err_out_module;
4986 read_only = rbd_opts->read_only;
4987 kfree(rbd_opts);
4988 rbd_opts = NULL; /* done with this */
4831 4989
4832 rbdc = rbd_get_client(ceph_opts); 4990 rbdc = rbd_get_client(ceph_opts);
4833 if (IS_ERR(rbdc)) { 4991 if (IS_ERR(rbdc)) {
@@ -4858,14 +5016,16 @@ static ssize_t rbd_add(struct bus_type *bus,
4858 rbdc = NULL; /* rbd_dev now owns this */ 5016 rbdc = NULL; /* rbd_dev now owns this */
4859 spec = NULL; /* rbd_dev now owns this */ 5017 spec = NULL; /* rbd_dev now owns this */
4860 5018
4861 rbd_dev->mapping.read_only = rbd_opts->read_only; 5019 rc = rbd_dev_image_probe(rbd_dev, true);
4862 kfree(rbd_opts);
4863 rbd_opts = NULL; /* done with this */
4864
4865 rc = rbd_dev_image_probe(rbd_dev);
4866 if (rc < 0) 5020 if (rc < 0)
4867 goto err_out_rbd_dev; 5021 goto err_out_rbd_dev;
4868 5022
5023 /* If we are mapping a snapshot it must be marked read-only */
5024
5025 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5026 read_only = true;
5027 rbd_dev->mapping.read_only = read_only;
5028
4869 rc = rbd_dev_device_setup(rbd_dev); 5029 rc = rbd_dev_device_setup(rbd_dev);
4870 if (!rc) 5030 if (!rc)
4871 return count; 5031 return count;
@@ -4911,7 +5071,7 @@ static void rbd_dev_device_release(struct device *dev)
4911 5071
4912 rbd_free_disk(rbd_dev); 5072 rbd_free_disk(rbd_dev);
4913 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5073 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4914 rbd_dev_clear_mapping(rbd_dev); 5074 rbd_dev_mapping_clear(rbd_dev);
4915 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5075 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4916 rbd_dev->major = 0; 5076 rbd_dev->major = 0;
4917 rbd_dev_id_put(rbd_dev); 5077 rbd_dev_id_put(rbd_dev);
@@ -4978,10 +5138,13 @@ static ssize_t rbd_remove(struct bus_type *bus,
4978 spin_unlock_irq(&rbd_dev->lock); 5138 spin_unlock_irq(&rbd_dev->lock);
4979 if (ret < 0) 5139 if (ret < 0)
4980 goto done; 5140 goto done;
4981 ret = count;
4982 rbd_bus_del_dev(rbd_dev); 5141 rbd_bus_del_dev(rbd_dev);
5142 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5143 if (ret)
5144 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4983 rbd_dev_image_release(rbd_dev); 5145 rbd_dev_image_release(rbd_dev);
4984 module_put(THIS_MODULE); 5146 module_put(THIS_MODULE);
5147 ret = count;
4985done: 5148done:
4986 mutex_unlock(&ctl_mutex); 5149 mutex_unlock(&ctl_mutex);
4987 5150
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index cdd4c09fda96..a22a7a502740 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -95,9 +95,9 @@ struct si_sm_data {
95 enum bt_states state; 95 enum bt_states state;
96 unsigned char seq; /* BT sequence number */ 96 unsigned char seq; /* BT sequence number */
97 struct si_sm_io *io; 97 struct si_sm_io *io;
98 unsigned char write_data[IPMI_MAX_MSG_LENGTH]; 98 unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
99 int write_count; 99 int write_count;
100 unsigned char read_data[IPMI_MAX_MSG_LENGTH]; 100 unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
101 int read_count; 101 int read_count;
102 int truncated; 102 int truncated;
103 long timeout; /* microseconds countdown */ 103 long timeout; /* microseconds countdown */
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 9eb360ff8cab..d5a5f020810a 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
837 return ipmi_ioctl(filep, cmd, arg); 837 return ipmi_ioctl(filep, cmd, arg);
838 } 838 }
839} 839}
840
841static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
842 unsigned long arg)
843{
844 int ret;
845
846 mutex_lock(&ipmi_mutex);
847 ret = compat_ipmi_ioctl(filep, cmd, arg);
848 mutex_unlock(&ipmi_mutex);
849
850 return ret;
851}
840#endif 852#endif
841 853
842static const struct file_operations ipmi_fops = { 854static const struct file_operations ipmi_fops = {
843 .owner = THIS_MODULE, 855 .owner = THIS_MODULE,
844 .unlocked_ioctl = ipmi_unlocked_ioctl, 856 .unlocked_ioctl = ipmi_unlocked_ioctl,
845#ifdef CONFIG_COMPAT 857#ifdef CONFIG_COMPAT
846 .compat_ioctl = compat_ipmi_ioctl, 858 .compat_ioctl = unlocked_compat_ipmi_ioctl,
847#endif 859#endif
848 .open = ipmi_open, 860 .open = ipmi_open,
849 .release = ipmi_release, 861 .release = ipmi_release,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 4d439d2fcfd6..4445fa164a2d 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2037,12 +2037,11 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
2037 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 2037 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2038 if (!entry) 2038 if (!entry)
2039 return -ENOMEM; 2039 return -ENOMEM;
2040 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); 2040 entry->name = kstrdup(name, GFP_KERNEL);
2041 if (!entry->name) { 2041 if (!entry->name) {
2042 kfree(entry); 2042 kfree(entry);
2043 return -ENOMEM; 2043 return -ENOMEM;
2044 } 2044 }
2045 strcpy(entry->name, name);
2046 2045
2047 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); 2046 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
2048 if (!file) { 2047 if (!file) {
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 313538abe63c..af4b23ffc5a6 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -663,8 +663,10 @@ static void handle_transaction_done(struct smi_info *smi_info)
663 /* We got the flags from the SMI, now handle them. */ 663 /* We got the flags from the SMI, now handle them. */
664 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 664 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
665 if (msg[2] != 0) { 665 if (msg[2] != 0) {
666 dev_warn(smi_info->dev, "Could not enable interrupts" 666 dev_warn(smi_info->dev,
667 ", failed get, using polled mode.\n"); 667 "Couldn't get irq info: %x.\n", msg[2]);
668 dev_warn(smi_info->dev,
669 "Maybe ok, but ipmi might run very slowly.\n");
668 smi_info->si_state = SI_NORMAL; 670 smi_info->si_state = SI_NORMAL;
669 } else { 671 } else {
670 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 672 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -685,10 +687,12 @@ static void handle_transaction_done(struct smi_info *smi_info)
685 687
686 /* We got the flags from the SMI, now handle them. */ 688 /* We got the flags from the SMI, now handle them. */
687 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 689 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
688 if (msg[2] != 0) 690 if (msg[2] != 0) {
689 dev_warn(smi_info->dev, "Could not enable interrupts" 691 dev_warn(smi_info->dev,
690 ", failed set, using polled mode.\n"); 692 "Couldn't set irq info: %x.\n", msg[2]);
691 else 693 dev_warn(smi_info->dev,
694 "Maybe ok, but ipmi might run very slowly.\n");
695 } else
692 smi_info->interrupt_disabled = 0; 696 smi_info->interrupt_disabled = 0;
693 smi_info->si_state = SI_NORMAL; 697 smi_info->si_state = SI_NORMAL;
694 break; 698 break;
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a1488f58f6ca..534fcb825153 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS
47 47
48choice 48choice
49 prompt "Default CPUFreq governor" 49 prompt "Default CPUFreq governor"
50 default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 50 default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
51 default CPU_FREQ_DEFAULT_GOV_PERFORMANCE 51 default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
52 help 52 help
53 This option sets which CPUFreq governor shall be loaded at 53 This option sets which CPUFreq governor shall be loaded at
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index f3af18b9acc5..6e57543fe0b9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,16 +3,17 @@
3# 3#
4 4
5config ARM_BIG_LITTLE_CPUFREQ 5config ARM_BIG_LITTLE_CPUFREQ
6 tristate 6 tristate "Generic ARM big LITTLE CPUfreq driver"
7 depends on ARM_CPU_TOPOLOGY 7 depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
8 help
9 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
8 10
9config ARM_DT_BL_CPUFREQ 11config ARM_DT_BL_CPUFREQ
10 tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" 12 tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
11 select ARM_BIG_LITTLE_CPUFREQ 13 depends on ARM_BIG_LITTLE_CPUFREQ && OF
12 depends on OF && HAVE_CLK
13 help 14 help
14 This enables the Generic CPUfreq driver for ARM big.LITTLE platform. 15 This enables probing via DT for Generic CPUfreq driver for ARM
15 This gets frequency tables from DT. 16 big.LITTLE platform. This gets frequency tables from DT.
16 17
17config ARM_EXYNOS_CPUFREQ 18config ARM_EXYNOS_CPUFREQ
18 bool "SAMSUNG EXYNOS SoCs" 19 bool "SAMSUNG EXYNOS SoCs"
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index dbdf677d2f36..5d7f53fcd6f5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS];
40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; 40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; 41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
42 42
43static int cpu_to_cluster(int cpu)
44{
45 return topology_physical_package_id(cpu);
46}
47
48static unsigned int bL_cpufreq_get(unsigned int cpu) 43static unsigned int bL_cpufreq_get(unsigned int cpu)
49{ 44{
50 u32 cur_cluster = cpu_to_cluster(cpu); 45 u32 cur_cluster = cpu_to_cluster(cpu);
@@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
192 187
193 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); 188 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
194 189
195 dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); 190 dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
196 return 0; 191 return 0;
197} 192}
198 193
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 70f18fc12d4a..79b2ce17884d 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops {
34 int (*init_opp_table)(struct device *cpu_dev); 34 int (*init_opp_table)(struct device *cpu_dev);
35}; 35};
36 36
37static inline int cpu_to_cluster(int cpu)
38{
39 return topology_physical_package_id(cpu);
40}
41
37int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); 42int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
38void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); 43void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
39 44
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 44be3115375c..173ed059d95f 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -66,8 +66,8 @@ static int dt_get_transition_latency(struct device *cpu_dev)
66 66
67 parent = of_find_node_by_path("/cpus"); 67 parent = of_find_node_by_path("/cpus");
68 if (!parent) { 68 if (!parent) {
69 pr_err("failed to find OF /cpus\n"); 69 pr_info("Failed to find OF /cpus. Use CPUFREQ_ETERNAL transition latency\n");
70 return -ENOENT; 70 return CPUFREQ_ETERNAL;
71 } 71 }
72 72
73 for_each_child_of_node(parent, np) { 73 for_each_child_of_node(parent, np) {
@@ -78,10 +78,11 @@ static int dt_get_transition_latency(struct device *cpu_dev)
78 of_node_put(np); 78 of_node_put(np);
79 of_node_put(parent); 79 of_node_put(parent);
80 80
81 return 0; 81 return transition_latency;
82 } 82 }
83 83
84 return -ENODEV; 84 pr_info("clock-latency isn't found, use CPUFREQ_ETERNAL transition latency\n");
85 return CPUFREQ_ETERNAL;
85} 86}
86 87
87static struct cpufreq_arm_bL_ops dt_bL_ops = { 88static struct cpufreq_arm_bL_ops dt_bL_ops = {
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 3ab8294eab04..a64eb8b70444 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -189,12 +189,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
189 189
190 if (!np) { 190 if (!np) {
191 pr_err("failed to find cpu0 node\n"); 191 pr_err("failed to find cpu0 node\n");
192 return -ENOENT; 192 ret = -ENOENT;
193 goto out_put_parent;
193 } 194 }
194 195
195 cpu_dev = &pdev->dev; 196 cpu_dev = &pdev->dev;
196 cpu_dev->of_node = np; 197 cpu_dev->of_node = np;
197 198
199 cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
200 if (IS_ERR(cpu_reg)) {
201 /*
202 * If cpu0 regulator supply node is present, but regulator is
203 * not yet registered, we should try defering probe.
204 */
205 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
206 dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
207 ret = -EPROBE_DEFER;
208 goto out_put_node;
209 }
210 pr_warn("failed to get cpu0 regulator: %ld\n",
211 PTR_ERR(cpu_reg));
212 cpu_reg = NULL;
213 }
214
198 cpu_clk = devm_clk_get(cpu_dev, NULL); 215 cpu_clk = devm_clk_get(cpu_dev, NULL);
199 if (IS_ERR(cpu_clk)) { 216 if (IS_ERR(cpu_clk)) {
200 ret = PTR_ERR(cpu_clk); 217 ret = PTR_ERR(cpu_clk);
@@ -202,12 +219,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
202 goto out_put_node; 219 goto out_put_node;
203 } 220 }
204 221
205 cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
206 if (IS_ERR(cpu_reg)) {
207 pr_warn("failed to get cpu0 regulator\n");
208 cpu_reg = NULL;
209 }
210
211 ret = of_init_opp_table(cpu_dev); 222 ret = of_init_opp_table(cpu_dev);
212 if (ret) { 223 if (ret) {
213 pr_err("failed to init OPP table: %d\n", ret); 224 pr_err("failed to init OPP table: %d\n", ret);
@@ -264,6 +275,8 @@ out_free_table:
264 opp_free_cpufreq_table(cpu_dev, &freq_table); 275 opp_free_cpufreq_table(cpu_dev, &freq_table);
265out_put_node: 276out_put_node:
266 of_node_put(np); 277 of_node_put(np);
278out_put_parent:
279 of_node_put(parent);
267 return ret; 280 return ret;
268} 281}
269 282
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1b8a48eaf90f..4b8c7f297d74 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1075 __func__, cpu_dev->id, cpu); 1075 __func__, cpu_dev->id, cpu);
1076 } 1076 }
1077 1077
1078 if ((cpus == 1) && (cpufreq_driver->target))
1079 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1080
1078 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); 1081 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1079 cpufreq_cpu_put(data); 1082 cpufreq_cpu_put(data);
1080 1083
1081 /* If cpu is last user of policy, free policy */ 1084 /* If cpu is last user of policy, free policy */
1082 if (cpus == 1) { 1085 if (cpus == 1) {
1083 if (cpufreq_driver->target)
1084 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1085
1086 lock_policy_rwsem_read(cpu); 1086 lock_policy_rwsem_read(cpu);
1087 kobj = &data->kobj; 1087 kobj = &data->kobj;
1088 cmp = &data->kobj_unregister; 1088 cmp = &data->kobj_unregister;
@@ -1832,15 +1832,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1832 if (dev) { 1832 if (dev) {
1833 switch (action) { 1833 switch (action) {
1834 case CPU_ONLINE: 1834 case CPU_ONLINE:
1835 case CPU_ONLINE_FROZEN:
1836 cpufreq_add_dev(dev, NULL); 1835 cpufreq_add_dev(dev, NULL);
1837 break; 1836 break;
1838 case CPU_DOWN_PREPARE: 1837 case CPU_DOWN_PREPARE:
1839 case CPU_DOWN_PREPARE_FROZEN: 1838 case CPU_UP_CANCELED_FROZEN:
1840 __cpufreq_remove_dev(dev, NULL); 1839 __cpufreq_remove_dev(dev, NULL);
1841 break; 1840 break;
1842 case CPU_DOWN_FAILED: 1841 case CPU_DOWN_FAILED:
1843 case CPU_DOWN_FAILED_FROZEN:
1844 cpufreq_add_dev(dev, NULL); 1842 cpufreq_add_dev(dev, NULL);
1845 break; 1843 break;
1846 } 1844 }
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 443442df113b..5af40ad82d23 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -255,6 +255,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
255 if (have_governor_per_policy()) { 255 if (have_governor_per_policy()) {
256 WARN_ON(dbs_data); 256 WARN_ON(dbs_data);
257 } else if (dbs_data) { 257 } else if (dbs_data) {
258 dbs_data->usage_count++;
258 policy->governor_data = dbs_data; 259 policy->governor_data = dbs_data;
259 return 0; 260 return 0;
260 } 261 }
@@ -266,6 +267,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
266 } 267 }
267 268
268 dbs_data->cdata = cdata; 269 dbs_data->cdata = cdata;
270 dbs_data->usage_count = 1;
269 rc = cdata->init(dbs_data); 271 rc = cdata->init(dbs_data);
270 if (rc) { 272 if (rc) {
271 pr_err("%s: POLICY_INIT: init() failed\n", __func__); 273 pr_err("%s: POLICY_INIT: init() failed\n", __func__);
@@ -294,7 +296,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
294 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, 296 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
295 latency * LATENCY_MULTIPLIER)); 297 latency * LATENCY_MULTIPLIER));
296 298
297 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 299 if ((cdata->governor == GOV_CONSERVATIVE) &&
300 (!policy->governor->initialized)) {
298 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; 301 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
299 302
300 cpufreq_register_notifier(cs_ops->notifier_block, 303 cpufreq_register_notifier(cs_ops->notifier_block,
@@ -306,12 +309,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
306 309
307 return 0; 310 return 0;
308 case CPUFREQ_GOV_POLICY_EXIT: 311 case CPUFREQ_GOV_POLICY_EXIT:
309 if ((policy->governor->initialized == 1) || 312 if (!--dbs_data->usage_count) {
310 have_governor_per_policy()) {
311 sysfs_remove_group(get_governor_parent_kobj(policy), 313 sysfs_remove_group(get_governor_parent_kobj(policy),
312 get_sysfs_attr(dbs_data)); 314 get_sysfs_attr(dbs_data));
313 315
314 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 316 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
317 (policy->governor->initialized == 1)) {
315 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; 318 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
316 319
317 cpufreq_unregister_notifier(cs_ops->notifier_block, 320 cpufreq_unregister_notifier(cs_ops->notifier_block,
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 8ac33538d0bd..e16a96130cb3 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -211,6 +211,7 @@ struct common_dbs_data {
211struct dbs_data { 211struct dbs_data {
212 struct common_dbs_data *cdata; 212 struct common_dbs_data *cdata;
213 unsigned int min_sampling_rate; 213 unsigned int min_sampling_rate;
214 int usage_count;
214 void *tuners; 215 void *tuners;
215 216
216 /* dbs_mutex protects dbs_enable in governor start/stop */ 217 /* dbs_mutex protects dbs_enable in governor start/stop */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index b0ffef96bf77..4b9bb5def6f1 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data)
547 tuners->io_is_busy = should_io_be_busy(); 547 tuners->io_is_busy = should_io_be_busy();
548 548
549 dbs_data->tuners = tuners; 549 dbs_data->tuners = tuners;
550 pr_info("%s: tuners %p\n", __func__, tuners);
551 mutex_init(&dbs_data->mutex); 550 mutex_init(&dbs_data->mutex);
552 return 0; 551 return 0;
553} 552}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index bfd6273fd873..fb65decffa28 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -349,15 +349,16 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
349 349
350 switch (action) { 350 switch (action) {
351 case CPU_ONLINE: 351 case CPU_ONLINE:
352 case CPU_ONLINE_FROZEN:
353 cpufreq_update_policy(cpu); 352 cpufreq_update_policy(cpu);
354 break; 353 break;
355 case CPU_DOWN_PREPARE: 354 case CPU_DOWN_PREPARE:
356 case CPU_DOWN_PREPARE_FROZEN:
357 cpufreq_stats_free_sysfs(cpu); 355 cpufreq_stats_free_sysfs(cpu);
358 break; 356 break;
359 case CPU_DEAD: 357 case CPU_DEAD:
360 case CPU_DEAD_FROZEN: 358 cpufreq_stats_free_table(cpu);
359 break;
360 case CPU_UP_CANCELED_FROZEN:
361 cpufreq_stats_free_sysfs(cpu);
361 cpufreq_stats_free_table(cpu); 362 cpufreq_stats_free_table(cpu);
362 break; 363 break;
363 } 364 }
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cc3a8e6c92be..9c36ace92a39 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
48} 48}
49 49
50struct sample { 50struct sample {
51 ktime_t start_time;
52 ktime_t end_time;
53 int core_pct_busy; 51 int core_pct_busy;
54 int pstate_pct_busy;
55 u64 duration_us;
56 u64 idletime_us;
57 u64 aperf; 52 u64 aperf;
58 u64 mperf; 53 u64 mperf;
59 int freq; 54 int freq;
@@ -86,13 +81,9 @@ struct cpudata {
86 struct pstate_adjust_policy *pstate_policy; 81 struct pstate_adjust_policy *pstate_policy;
87 struct pstate_data pstate; 82 struct pstate_data pstate;
88 struct _pid pid; 83 struct _pid pid;
89 struct _pid idle_pid;
90 84
91 int min_pstate_count; 85 int min_pstate_count;
92 int idle_mode;
93 86
94 ktime_t prev_sample;
95 u64 prev_idle_time_us;
96 u64 prev_aperf; 87 u64 prev_aperf;
97 u64 prev_mperf; 88 u64 prev_mperf;
98 int sample_ptr; 89 int sample_ptr;
@@ -124,6 +115,8 @@ struct perf_limits {
124 int min_perf_pct; 115 int min_perf_pct;
125 int32_t max_perf; 116 int32_t max_perf;
126 int32_t min_perf; 117 int32_t min_perf;
118 int max_policy_pct;
119 int max_sysfs_pct;
127}; 120};
128 121
129static struct perf_limits limits = { 122static struct perf_limits limits = {
@@ -132,6 +125,8 @@ static struct perf_limits limits = {
132 .max_perf = int_tofp(1), 125 .max_perf = int_tofp(1),
133 .min_perf_pct = 0, 126 .min_perf_pct = 0,
134 .min_perf = 0, 127 .min_perf = 0,
128 .max_policy_pct = 100,
129 .max_sysfs_pct = 100,
135}; 130};
136 131
137static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 132static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
202 0); 197 0);
203} 198}
204 199
205static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
206{
207 pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
208 pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
209 pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
210
211 pid_reset(&cpu->idle_pid,
212 75,
213 50,
214 cpu->pstate_policy->deadband,
215 0);
216}
217
218static inline void intel_pstate_reset_all_pid(void) 200static inline void intel_pstate_reset_all_pid(void)
219{ 201{
220 unsigned int cpu; 202 unsigned int cpu;
@@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
302 if (ret != 1) 284 if (ret != 1)
303 return -EINVAL; 285 return -EINVAL;
304 286
305 limits.max_perf_pct = clamp_t(int, input, 0 , 100); 287 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
288 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
306 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 289 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
307 return count; 290 return count;
308} 291}
@@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
408 if (pstate == cpu->pstate.current_pstate) 391 if (pstate == cpu->pstate.current_pstate)
409 return; 392 return;
410 393
411#ifndef MODULE
412 trace_cpu_frequency(pstate * 100000, cpu->cpu); 394 trace_cpu_frequency(pstate * 100000, cpu->cpu);
413#endif 395
414 cpu->pstate.current_pstate = pstate; 396 cpu->pstate.current_pstate = pstate;
415 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); 397 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
416 398
@@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
450 struct sample *sample) 432 struct sample *sample)
451{ 433{
452 u64 core_pct; 434 u64 core_pct;
453 sample->pstate_pct_busy = 100 - div64_u64(
454 sample->idletime_us * 100,
455 sample->duration_us);
456 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 435 core_pct = div64_u64(sample->aperf * 100, sample->mperf);
457 sample->freq = cpu->pstate.max_pstate * core_pct * 1000; 436 sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
458 437
459 sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), 438 sample->core_pct_busy = core_pct;
460 100);
461} 439}
462 440
463static inline void intel_pstate_sample(struct cpudata *cpu) 441static inline void intel_pstate_sample(struct cpudata *cpu)
464{ 442{
465 ktime_t now;
466 u64 idle_time_us;
467 u64 aperf, mperf; 443 u64 aperf, mperf;
468 444
469 now = ktime_get();
470 idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
471
472 rdmsrl(MSR_IA32_APERF, aperf); 445 rdmsrl(MSR_IA32_APERF, aperf);
473 rdmsrl(MSR_IA32_MPERF, mperf); 446 rdmsrl(MSR_IA32_MPERF, mperf);
474 /* for the first sample, don't actually record a sample, just 447 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
475 * set the baseline */ 448 cpu->samples[cpu->sample_ptr].aperf = aperf;
476 if (cpu->prev_idle_time_us > 0) { 449 cpu->samples[cpu->sample_ptr].mperf = mperf;
477 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 450 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
478 cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; 451 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
479 cpu->samples[cpu->sample_ptr].end_time = now; 452
480 cpu->samples[cpu->sample_ptr].duration_us = 453 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
481 ktime_us_delta(now, cpu->prev_sample);
482 cpu->samples[cpu->sample_ptr].idletime_us =
483 idle_time_us - cpu->prev_idle_time_us;
484
485 cpu->samples[cpu->sample_ptr].aperf = aperf;
486 cpu->samples[cpu->sample_ptr].mperf = mperf;
487 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
488 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
489
490 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
491 }
492 454
493 cpu->prev_sample = now;
494 cpu->prev_idle_time_us = idle_time_us;
495 cpu->prev_aperf = aperf; 455 cpu->prev_aperf = aperf;
496 cpu->prev_mperf = mperf; 456 cpu->prev_mperf = mperf;
497} 457}
@@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
505 mod_timer_pinned(&cpu->timer, jiffies + delay); 465 mod_timer_pinned(&cpu->timer, jiffies + delay);
506} 466}
507 467
508static inline void intel_pstate_idle_mode(struct cpudata *cpu)
509{
510 cpu->idle_mode = 1;
511}
512
513static inline void intel_pstate_normal_mode(struct cpudata *cpu)
514{
515 cpu->idle_mode = 0;
516}
517
518static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
519{ 469{
520 int32_t busy_scaled; 470 int32_t busy_scaled;
@@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
547 intel_pstate_pstate_decrease(cpu, steps); 497 intel_pstate_pstate_decrease(cpu, steps);
548} 498}
549 499
550static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
551{
552 int busy_scaled;
553 struct _pid *pid;
554 int ctl = 0;
555 int steps;
556
557 pid = &cpu->idle_pid;
558
559 busy_scaled = intel_pstate_get_scaled_busy(cpu);
560
561 ctl = pid_calc(pid, 100 - busy_scaled);
562
563 steps = abs(ctl);
564 if (ctl < 0)
565 intel_pstate_pstate_decrease(cpu, steps);
566 else
567 intel_pstate_pstate_increase(cpu, steps);
568
569 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
570 intel_pstate_normal_mode(cpu);
571}
572
573static void intel_pstate_timer_func(unsigned long __data) 500static void intel_pstate_timer_func(unsigned long __data)
574{ 501{
575 struct cpudata *cpu = (struct cpudata *) __data; 502 struct cpudata *cpu = (struct cpudata *) __data;
576 503
577 intel_pstate_sample(cpu); 504 intel_pstate_sample(cpu);
505 intel_pstate_adjust_busy_pstate(cpu);
578 506
579 if (!cpu->idle_mode)
580 intel_pstate_adjust_busy_pstate(cpu);
581 else
582 intel_pstate_adjust_idle_pstate(cpu);
583
584#if defined(XPERF_FIX)
585 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { 507 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
586 cpu->min_pstate_count++; 508 cpu->min_pstate_count++;
587 if (!(cpu->min_pstate_count % 5)) { 509 if (!(cpu->min_pstate_count % 5)) {
588 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 510 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
589 intel_pstate_idle_mode(cpu);
590 } 511 }
591 } else 512 } else
592 cpu->min_pstate_count = 0; 513 cpu->min_pstate_count = 0;
593#endif 514
594 intel_pstate_set_sample_time(cpu); 515 intel_pstate_set_sample_time(cpu);
595} 516}
596 517
@@ -631,7 +552,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
631 (unsigned long)cpu; 552 (unsigned long)cpu;
632 cpu->timer.expires = jiffies + HZ/100; 553 cpu->timer.expires = jiffies + HZ/100;
633 intel_pstate_busy_pid_reset(cpu); 554 intel_pstate_busy_pid_reset(cpu);
634 intel_pstate_idle_pid_reset(cpu);
635 intel_pstate_sample(cpu); 555 intel_pstate_sample(cpu);
636 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 556 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
637 557
@@ -675,8 +595,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
675 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 595 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
676 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 596 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
677 597
678 limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; 598 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
679 limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); 599 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
600 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
680 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 601 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
681 602
682 return 0; 603 return 0;
@@ -788,10 +709,9 @@ static int __init intel_pstate_init(void)
788 709
789 pr_info("Intel P-state driver initializing.\n"); 710 pr_info("Intel P-state driver initializing.\n");
790 711
791 all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); 712 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
792 if (!all_cpu_data) 713 if (!all_cpu_data)
793 return -ENOMEM; 714 return -ENOMEM;
794 memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
795 715
796 rc = cpufreq_register_driver(&intel_pstate_driver); 716 rc = cpufreq_register_driver(&intel_pstate_driver);
797 if (rc) 717 if (rc)
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index d36ea8dc96eb..b2644af985ec 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
171 priv.dev = &pdev->dev; 171 priv.dev = &pdev->dev;
172 172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 if (!res) {
175 dev_err(&pdev->dev, "Cannot get memory resource\n");
176 return -ENODEV;
177 }
178 priv.base = devm_ioremap_resource(&pdev->dev, res); 174 priv.base = devm_ioremap_resource(&pdev->dev, res);
179 if (IS_ERR(priv.base)) 175 if (IS_ERR(priv.base))
180 return PTR_ERR(priv.base); 176 return PTR_ERR(priv.base);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3a8f7e6db295..e7e92429d10f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
78{ 78{
79 struct drm_crtc *crtc; 79 struct drm_crtc *crtc;
80 80
81 /* Locking is currently fubar in the panic handler. */
82 if (oops_in_progress)
83 return;
84
81 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 85 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
82 WARN_ON(!mutex_is_locked(&crtc->mutex)); 86 WARN_ON(!mutex_is_locked(&crtc->mutex));
83 87
@@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
246 else 250 else
247 return "unknown"; 251 return "unknown";
248} 252}
253EXPORT_SYMBOL(drm_get_connector_status_name);
249 254
250/** 255/**
251 * drm_mode_object_get - allocate a new modeset identifier 256 * drm_mode_object_get - allocate a new modeset identifier
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index e974f9309b72..ed1334e27c33 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
121 connector->helper_private; 121 connector->helper_private;
122 int count = 0; 122 int count = 0;
123 int mode_flags = 0; 123 int mode_flags = 0;
124 bool verbose_prune = true;
124 125
125 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 126 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
126 drm_get_connector_name(connector)); 127 drm_get_connector_name(connector));
@@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
149 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 150 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
150 connector->base.id, drm_get_connector_name(connector)); 151 connector->base.id, drm_get_connector_name(connector));
151 drm_mode_connector_update_edid_property(connector, NULL); 152 drm_mode_connector_update_edid_property(connector, NULL);
153 verbose_prune = false;
152 goto prune; 154 goto prune;
153 } 155 }
154 156
@@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
182 } 184 }
183 185
184prune: 186prune:
185 drm_mode_prune_invalid(dev, &connector->modes, true); 187 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
186 188
187 if (list_empty(&connector->modes)) 189 if (list_empty(&connector->modes))
188 return 0; 190 return 0;
@@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work)
1005 continue; 1007 continue;
1006 1008
1007 connector->status = connector->funcs->detect(connector, false); 1009 connector->status = connector->funcs->detect(connector, false);
1008 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1010 if (old_status != connector->status) {
1009 connector->base.id, 1011 const char *old, *new;
1010 drm_get_connector_name(connector), 1012
1011 old_status, connector->status); 1013 old = drm_get_connector_status_name(old_status);
1012 if (old_status != connector->status) 1014 new = drm_get_connector_status_name(connector->status);
1015
1016 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
1017 "status updated from %s to %s\n",
1018 connector->base.id,
1019 drm_get_connector_name(connector),
1020 old, new);
1021
1013 changed = true; 1022 changed = true;
1023 }
1014 } 1024 }
1015 1025
1016 mutex_unlock(&dev->mode_config.mutex); 1026 mutex_unlock(&dev->mode_config.mutex);
@@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1083 old_status = connector->status; 1093 old_status = connector->status;
1084 1094
1085 connector->status = connector->funcs->detect(connector, false); 1095 connector->status = connector->funcs->detect(connector, false);
1086 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1096 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1087 connector->base.id, 1097 connector->base.id,
1088 drm_get_connector_name(connector), 1098 drm_get_connector_name(connector),
1089 old_status, connector->status); 1099 drm_get_connector_status_name(old_status),
1100 drm_get_connector_status_name(connector->status));
1090 if (old_status != connector->status) 1101 if (old_status != connector->status)
1091 changed = true; 1102 changed = true;
1092 } 1103 }
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8d4f29075af5..9cc247f55502 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data,
57 struct drm_file *file_priv); 57 struct drm_file *file_priv);
58 58
59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} 60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static const struct drm_ioctl_desc drm_ioctls[] = { 63static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
375{ 375{
376 struct drm_file *file_priv = filp->private_data; 376 struct drm_file *file_priv = filp->private_data;
377 struct drm_device *dev; 377 struct drm_device *dev;
378 const struct drm_ioctl_desc *ioctl; 378 const struct drm_ioctl_desc *ioctl = NULL;
379 drm_ioctl_t *func; 379 drm_ioctl_t *func;
380 unsigned int nr = DRM_IOCTL_NR(cmd); 380 unsigned int nr = DRM_IOCTL_NR(cmd);
381 int retcode = -EINVAL; 381 int retcode = -EINVAL;
@@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp,
392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
393 ++file_priv->ioctl_count; 393 ++file_priv->ioctl_count;
394 394
395 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
396 task_pid_nr(current), cmd, nr,
397 (long)old_encode_dev(file_priv->minor->device),
398 file_priv->authenticated);
399
400 if ((nr >= DRM_CORE_IOCTL_COUNT) && 395 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
401 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 396 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
402 goto err_i1; 397 goto err_i1;
@@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp,
417 } else 412 } else
418 goto err_i1; 413 goto err_i1;
419 414
415 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
416 task_pid_nr(current),
417 (long)old_encode_dev(file_priv->minor->device),
418 file_priv->authenticated, ioctl->name);
419
420 /* Do not trust userspace, use our own definition */ 420 /* Do not trust userspace, use our own definition */
421 func = ioctl->func; 421 func = ioctl->func;
422 /* is there a local override? */ 422 /* is there a local override? */
@@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp,
471 } 471 }
472 472
473 err_i1: 473 err_i1:
474 if (!ioctl)
475 DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
476 task_pid_nr(current),
477 (long)old_encode_dev(file_priv->minor->device),
478 file_priv->authenticated, cmd, nr);
479
474 if (kdata != stack_kdata) 480 if (kdata != stack_kdata)
475 kfree(kdata); 481 kfree(kdata);
476 atomic_dec(&dev->ioctl_count); 482 atomic_dec(&dev->ioctl_count);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 48c52f7df4e6..0cfb60f54766 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
54 struct i2c_adapter *adap, 54 struct i2c_adapter *adap,
55 const struct i2c_board_info *info) 55 const struct i2c_board_info *info)
56{ 56{
57 char modalias[sizeof(I2C_MODULE_PREFIX)
58 + I2C_NAME_SIZE];
59 struct module *module = NULL; 57 struct module *module = NULL;
60 struct i2c_client *client; 58 struct i2c_client *client;
61 struct drm_i2c_encoder_driver *encoder_drv; 59 struct drm_i2c_encoder_driver *encoder_drv;
62 int err = 0; 60 int err = 0;
63 61
64 snprintf(modalias, sizeof(modalias), 62 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
65 "%s%s", I2C_MODULE_PREFIX, info->type);
66 request_module(modalias);
67 63
68 client = i2c_new_device(adap, info); 64 client = i2c_new_device(adap, info);
69 if (!client) { 65 if (!client) {
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index db1e2d6f90d7..07cf99cc8862 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
755EXPORT_SYMBOL(drm_mm_debug_table); 755EXPORT_SYMBOL(drm_mm_debug_table);
756 756
757#if defined(CONFIG_DEBUG_FS) 757#if defined(CONFIG_DEBUG_FS)
758int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 758static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
759{ 759{
760 struct drm_mm_node *entry;
761 unsigned long total_used = 0, total_free = 0, total = 0;
762 unsigned long hole_start, hole_end, hole_size; 760 unsigned long hole_start, hole_end, hole_size;
763 761
764 hole_start = drm_mm_hole_node_start(&mm->head_node); 762 if (entry->hole_follows) {
765 hole_end = drm_mm_hole_node_end(&mm->head_node); 763 hole_start = drm_mm_hole_node_start(entry);
766 hole_size = hole_end - hole_start; 764 hole_end = drm_mm_hole_node_end(entry);
767 if (hole_size) 765 hole_size = hole_end - hole_start;
768 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 766 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
769 hole_start, hole_end, hole_size); 767 hole_start, hole_end, hole_size);
770 total_free += hole_size; 768 return hole_size;
769 }
770
771 return 0;
772}
773
774int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
775{
776 struct drm_mm_node *entry;
777 unsigned long total_used = 0, total_free = 0, total = 0;
778
779 total_free += drm_mm_dump_hole(m, &mm->head_node);
771 780
772 drm_mm_for_each_node(entry, mm) { 781 drm_mm_for_each_node(entry, mm) {
773 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", 782 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
774 entry->start, entry->start + entry->size, 783 entry->start, entry->start + entry->size,
775 entry->size); 784 entry->size);
776 total_used += entry->size; 785 total_used += entry->size;
777 if (entry->hole_follows) { 786 total_free += drm_mm_dump_hole(m, entry);
778 hole_start = drm_mm_hole_node_start(entry);
779 hole_end = drm_mm_hole_node_end(entry);
780 hole_size = hole_end - hole_start;
781 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
782 hole_start, hole_end, hole_size);
783 total_free += hole_size;
784 }
785 } 787 }
786 total = total_free + total_used; 788 total = total_free + total_used;
787 789
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index faa79df02648..a371ff865a88 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1143 was_digit = false; 1143 was_digit = false;
1144 } else 1144 } else
1145 goto done; 1145 goto done;
1146 break;
1146 case '0' ... '9': 1147 case '0' ... '9':
1147 was_digit = true; 1148 was_digit = true;
1148 break; 1149 break;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6be940effefd..6165535d15f0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1045 if (timeout) { 1045 if (timeout) {
1046 struct timespec sleep_time = timespec_sub(now, before); 1046 struct timespec sleep_time = timespec_sub(now, before);
1047 *timeout = timespec_sub(*timeout, sleep_time); 1047 *timeout = timespec_sub(*timeout, sleep_time);
1048 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1049 set_normalized_timespec(timeout, 0, 0);
1048 } 1050 }
1049 1051
1050 switch (end) { 1052 switch (end) {
@@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1053 case -ERESTARTSYS: /* Signal */ 1055 case -ERESTARTSYS: /* Signal */
1054 return (int)end; 1056 return (int)end;
1055 case 0: /* Timeout */ 1057 case 0: /* Timeout */
1056 if (timeout)
1057 set_normalized_timespec(timeout, 0, 0);
1058 return -ETIME; 1058 return -ETIME;
1059 default: /* Completed */ 1059 default: /* Completed */
1060 WARN_ON(end < 0); /* We're not aware of other errors */ 1060 WARN_ON(end < 0); /* We're not aware of other errors */
@@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2377 mutex_unlock(&dev->struct_mutex); 2377 mutex_unlock(&dev->struct_mutex);
2378 2378
2379 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2379 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2380 if (timeout) { 2380 if (timeout)
2381 WARN_ON(!timespec_valid(timeout));
2382 args->timeout_ns = timespec_to_ns(timeout); 2381 args->timeout_ns = timespec_to_ns(timeout);
2383 }
2384 return ret; 2382 return ret;
2385 2383
2386out: 2384out:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dca614de71b6..bdb0d7717bc7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
709 return snb_gmch_ctl << 25; /* 32 MB units */ 709 return snb_gmch_ctl << 25; /* 32 MB units */
710} 710}
711 711
712static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
713{
714 static const int stolen_decoder[] = {
715 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
716 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
717 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
718 return stolen_decoder[snb_gmch_ctl] << 20;
719}
720
721static int gen6_gmch_probe(struct drm_device *dev, 712static int gen6_gmch_probe(struct drm_device *dev,
722 size_t *gtt_total, 713 size_t *gtt_total,
723 size_t *stolen, 714 size_t *stolen,
@@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
747 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 738 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
748 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 739 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
749 740
750 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) 741 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
751 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
752 else
753 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
754
755 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; 742 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
756 743
757 /* For Modern GENs the PTEs and register space are split in the BAR */ 744 /* For Modern GENs the PTEs and register space are split in the BAR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 83f9c26e1adb..2d6b62e42daf 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -46,8 +46,6 @@
46#define SNB_GMCH_GGMS_MASK 0x3 46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f 48#define SNB_GMCH_GMS_MASK 0x1f
49#define IVB_GMCH_GMS_SHIFT 4
50#define IVB_GMCH_GMS_MASK 0xf
51 49
52 50
53/* PCI config space */ 51/* PCI config space */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 26a0a570f92e..fb961bb81903 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1265 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1265 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1266 intel_dp_start_link_train(intel_dp); 1266 intel_dp_start_link_train(intel_dp);
1267 intel_dp_complete_link_train(intel_dp); 1267 intel_dp_complete_link_train(intel_dp);
1268 if (port != PORT_A)
1269 intel_dp_stop_link_train(intel_dp);
1268 } 1270 }
1269} 1271}
1270 1272
@@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1326 } else if (type == INTEL_OUTPUT_EDP) { 1328 } else if (type == INTEL_OUTPUT_EDP) {
1327 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1329 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1328 1330
1331 if (port == PORT_A)
1332 intel_dp_stop_link_train(intel_dp);
1333
1329 ironlake_edp_backlight_on(intel_dp); 1334 ironlake_edp_backlight_on(intel_dp);
1330 } 1335 }
1331 1336
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fb2fbc1e08b9..3d704b706a8d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
703 * bpc in between. */ 703 * bpc in between. */
704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp); 704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
705 if (is_edp(intel_dp) && dev_priv->edp.bpp)
706 bpp = min_t(int, bpp, dev_priv->edp.bpp);
707
705 for (; bpp >= 6*3; bpp -= 2*3) { 708 for (; bpp >= 6*3; bpp -= 2*3) {
706 mode_rate = intel_dp_link_required(target_clock, bpp); 709 mode_rate = intel_dp_link_required(target_clock, bpp);
707 710
@@ -739,6 +742,7 @@ found:
739 intel_dp->link_bw = bws[clock]; 742 intel_dp->link_bw = bws[clock];
740 intel_dp->lane_count = lane_count; 743 intel_dp->lane_count = lane_count;
741 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 744 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
745 pipe_config->pipe_bpp = bpp;
742 pipe_config->pixel_target_clock = target_clock; 746 pipe_config->pixel_target_clock = target_clock;
743 747
744 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 748 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
@@ -751,20 +755,6 @@ found:
751 target_clock, adjusted_mode->clock, 755 target_clock, adjusted_mode->clock,
752 &pipe_config->dp_m_n); 756 &pipe_config->dp_m_n);
753 757
754 /*
755 * XXX: We have a strange regression where using the vbt edp bpp value
756 * for the link bw computation results in black screens, the panel only
757 * works when we do the computation at the usual 24bpp (but still
758 * requires us to use 18bpp). Until that's fully debugged, stay
759 * bug-for-bug compatible with the old code.
760 */
761 if (is_edp(intel_dp) && dev_priv->edp.bpp) {
762 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
763 bpp, dev_priv->edp.bpp);
764 bpp = min_t(int, bpp, dev_priv->edp.bpp);
765 }
766 pipe_config->pipe_bpp = bpp;
767
768 return true; 758 return true;
769} 759}
770 760
@@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1389 ironlake_edp_panel_on(intel_dp); 1379 ironlake_edp_panel_on(intel_dp);
1390 ironlake_edp_panel_vdd_off(intel_dp, true); 1380 ironlake_edp_panel_vdd_off(intel_dp, true);
1391 intel_dp_complete_link_train(intel_dp); 1381 intel_dp_complete_link_train(intel_dp);
1382 intel_dp_stop_link_train(intel_dp);
1392 ironlake_edp_backlight_on(intel_dp); 1383 ironlake_edp_backlight_on(intel_dp);
1393} 1384}
1394 1385
@@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1711 struct drm_i915_private *dev_priv = dev->dev_private; 1702 struct drm_i915_private *dev_priv = dev->dev_private;
1712 enum port port = intel_dig_port->port; 1703 enum port port = intel_dig_port->port;
1713 int ret; 1704 int ret;
1714 uint32_t temp;
1715 1705
1716 if (HAS_DDI(dev)) { 1706 if (HAS_DDI(dev)) {
1717 temp = I915_READ(DP_TP_CTL(port)); 1707 uint32_t temp = I915_READ(DP_TP_CTL(port));
1718 1708
1719 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1709 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1720 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1710 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1724 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1714 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1725 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1715 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1726 case DP_TRAINING_PATTERN_DISABLE: 1716 case DP_TRAINING_PATTERN_DISABLE:
1727
1728 if (port != PORT_A) {
1729 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1730 I915_WRITE(DP_TP_CTL(port), temp);
1731
1732 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1733 DP_TP_STATUS_IDLE_DONE), 1))
1734 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1735
1736 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1737 }
1738
1739 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1717 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1740 1718
1741 break; 1719 break;
@@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1811 return true; 1789 return true;
1812} 1790}
1813 1791
1792static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
1793{
1794 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1795 struct drm_device *dev = intel_dig_port->base.base.dev;
1796 struct drm_i915_private *dev_priv = dev->dev_private;
1797 enum port port = intel_dig_port->port;
1798 uint32_t val;
1799
1800 if (!HAS_DDI(dev))
1801 return;
1802
1803 val = I915_READ(DP_TP_CTL(port));
1804 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1805 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
1806 I915_WRITE(DP_TP_CTL(port), val);
1807
1808 /*
1809 * On PORT_A we can have only eDP in SST mode. There the only reason
1810 * we need to set idle transmission mode is to work around a HW issue
1811 * where we enable the pipe while not in idle link-training mode.
1812 * In this case there is requirement to wait for a minimum number of
1813 * idle patterns to be sent.
1814 */
1815 if (port == PORT_A)
1816 return;
1817
1818 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
1819 1))
1820 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1821}
1822
1814/* Enable corresponding port and start training pattern 1 */ 1823/* Enable corresponding port and start training pattern 1 */
1815void 1824void
1816intel_dp_start_link_train(struct intel_dp *intel_dp) 1825intel_dp_start_link_train(struct intel_dp *intel_dp)
@@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1953 ++tries; 1962 ++tries;
1954 } 1963 }
1955 1964
1965 intel_dp_set_idle_link_train(intel_dp);
1966
1967 intel_dp->DP = DP;
1968
1956 if (channel_eq) 1969 if (channel_eq)
1957 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 1970 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
1958 1971
1959 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1972}
1973
1974void intel_dp_stop_link_train(struct intel_dp *intel_dp)
1975{
1976 intel_dp_set_link_train(intel_dp, intel_dp->DP,
1977 DP_TRAINING_PATTERN_DISABLE);
1960} 1978}
1961 1979
1962static void 1980static void
@@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2164 drm_get_encoder_name(&intel_encoder->base)); 2182 drm_get_encoder_name(&intel_encoder->base));
2165 intel_dp_start_link_train(intel_dp); 2183 intel_dp_start_link_train(intel_dp);
2166 intel_dp_complete_link_train(intel_dp); 2184 intel_dp_complete_link_train(intel_dp);
2185 intel_dp_stop_link_train(intel_dp);
2167 } 2186 }
2168} 2187}
2169 2188
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b5b6d19e6dd3..624a9e6b8d71 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
499extern void intel_dp_init_link_config(struct intel_dp *intel_dp); 499extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
500extern void intel_dp_start_link_train(struct intel_dp *intel_dp); 500extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
501extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); 501extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
502extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
502extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 503extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
503extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); 504extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
504extern void intel_dp_check_link_status(struct intel_dp *intel_dp); 505extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 0e19e575a1b4..6b7c3ca2c035 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev)
262void intel_fbdev_set_suspend(struct drm_device *dev, int state) 262void intel_fbdev_set_suspend(struct drm_device *dev, int state)
263{ 263{
264 drm_i915_private_t *dev_priv = dev->dev_private; 264 drm_i915_private_t *dev_priv = dev->dev_private;
265 if (!dev_priv->fbdev) 265 struct intel_fbdev *ifbdev = dev_priv->fbdev;
266 struct fb_info *info;
267
268 if (!ifbdev)
266 return; 269 return;
267 270
268 fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); 271 info = ifbdev->helper.fbdev;
272
273 /* On resume from hibernation: If the object is shmemfs backed, it has
274 * been restored from swap. If the object is stolen however, it will be
275 * full of whatever garbage was left in there.
276 */
277 if (!state && ifbdev->ifb.obj->stolen)
278 memset_io(info->screen_base, 0, info->screen_size);
279
280 fb_set_suspend(info, state);
269} 281}
270 282
271MODULE_LICENSE("GPL and additional rights"); 283MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index de3b0dc5658b..aa01128ff192 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev)
1301 1301
1302 vlv_update_drain_latency(dev); 1302 vlv_update_drain_latency(dev);
1303 1303
1304 if (g4x_compute_wm0(dev, 0, 1304 if (g4x_compute_wm0(dev, PIPE_A,
1305 &valleyview_wm_info, latency_ns, 1305 &valleyview_wm_info, latency_ns,
1306 &valleyview_cursor_wm_info, latency_ns, 1306 &valleyview_cursor_wm_info, latency_ns,
1307 &planea_wm, &cursora_wm)) 1307 &planea_wm, &cursora_wm))
1308 enabled |= 1; 1308 enabled |= 1 << PIPE_A;
1309 1309
1310 if (g4x_compute_wm0(dev, 1, 1310 if (g4x_compute_wm0(dev, PIPE_B,
1311 &valleyview_wm_info, latency_ns, 1311 &valleyview_wm_info, latency_ns,
1312 &valleyview_cursor_wm_info, latency_ns, 1312 &valleyview_cursor_wm_info, latency_ns,
1313 &planeb_wm, &cursorb_wm)) 1313 &planeb_wm, &cursorb_wm))
1314 enabled |= 2; 1314 enabled |= 1 << PIPE_B;
1315 1315
1316 if (single_plane_enabled(enabled) && 1316 if (single_plane_enabled(enabled) &&
1317 g4x_compute_srwm(dev, ffs(enabled) - 1, 1317 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev)
1357 int plane_sr, cursor_sr; 1357 int plane_sr, cursor_sr;
1358 unsigned int enabled = 0; 1358 unsigned int enabled = 0;
1359 1359
1360 if (g4x_compute_wm0(dev, 0, 1360 if (g4x_compute_wm0(dev, PIPE_A,
1361 &g4x_wm_info, latency_ns, 1361 &g4x_wm_info, latency_ns,
1362 &g4x_cursor_wm_info, latency_ns, 1362 &g4x_cursor_wm_info, latency_ns,
1363 &planea_wm, &cursora_wm)) 1363 &planea_wm, &cursora_wm))
1364 enabled |= 1; 1364 enabled |= 1 << PIPE_A;
1365 1365
1366 if (g4x_compute_wm0(dev, 1, 1366 if (g4x_compute_wm0(dev, PIPE_B,
1367 &g4x_wm_info, latency_ns, 1367 &g4x_wm_info, latency_ns,
1368 &g4x_cursor_wm_info, latency_ns, 1368 &g4x_cursor_wm_info, latency_ns,
1369 &planeb_wm, &cursorb_wm)) 1369 &planeb_wm, &cursorb_wm))
1370 enabled |= 2; 1370 enabled |= 1 << PIPE_B;
1371 1371
1372 if (single_plane_enabled(enabled) && 1372 if (single_plane_enabled(enabled) &&
1373 g4x_compute_srwm(dev, ffs(enabled) - 1, 1373 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1716 unsigned int enabled; 1716 unsigned int enabled;
1717 1717
1718 enabled = 0; 1718 enabled = 0;
1719 if (g4x_compute_wm0(dev, 0, 1719 if (g4x_compute_wm0(dev, PIPE_A,
1720 &ironlake_display_wm_info, 1720 &ironlake_display_wm_info,
1721 ILK_LP0_PLANE_LATENCY, 1721 ILK_LP0_PLANE_LATENCY,
1722 &ironlake_cursor_wm_info, 1722 &ironlake_cursor_wm_info,
@@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev)
1727 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1727 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1728 " plane %d, " "cursor: %d\n", 1728 " plane %d, " "cursor: %d\n",
1729 plane_wm, cursor_wm); 1729 plane_wm, cursor_wm);
1730 enabled |= 1; 1730 enabled |= 1 << PIPE_A;
1731 } 1731 }
1732 1732
1733 if (g4x_compute_wm0(dev, 1, 1733 if (g4x_compute_wm0(dev, PIPE_B,
1734 &ironlake_display_wm_info, 1734 &ironlake_display_wm_info,
1735 ILK_LP0_PLANE_LATENCY, 1735 ILK_LP0_PLANE_LATENCY,
1736 &ironlake_cursor_wm_info, 1736 &ironlake_cursor_wm_info,
@@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1741 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1741 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1742 " plane %d, cursor: %d\n", 1742 " plane %d, cursor: %d\n",
1743 plane_wm, cursor_wm); 1743 plane_wm, cursor_wm);
1744 enabled |= 2; 1744 enabled |= 1 << PIPE_B;
1745 } 1745 }
1746 1746
1747 /* 1747 /*
@@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1801 unsigned int enabled; 1801 unsigned int enabled;
1802 1802
1803 enabled = 0; 1803 enabled = 0;
1804 if (g4x_compute_wm0(dev, 0, 1804 if (g4x_compute_wm0(dev, PIPE_A,
1805 &sandybridge_display_wm_info, latency, 1805 &sandybridge_display_wm_info, latency,
1806 &sandybridge_cursor_wm_info, latency, 1806 &sandybridge_cursor_wm_info, latency,
1807 &plane_wm, &cursor_wm)) { 1807 &plane_wm, &cursor_wm)) {
@@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
1812 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1812 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1813 " plane %d, " "cursor: %d\n", 1813 " plane %d, " "cursor: %d\n",
1814 plane_wm, cursor_wm); 1814 plane_wm, cursor_wm);
1815 enabled |= 1; 1815 enabled |= 1 << PIPE_A;
1816 } 1816 }
1817 1817
1818 if (g4x_compute_wm0(dev, 1, 1818 if (g4x_compute_wm0(dev, PIPE_B,
1819 &sandybridge_display_wm_info, latency, 1819 &sandybridge_display_wm_info, latency,
1820 &sandybridge_cursor_wm_info, latency, 1820 &sandybridge_cursor_wm_info, latency,
1821 &plane_wm, &cursor_wm)) { 1821 &plane_wm, &cursor_wm)) {
@@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1826 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1826 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1827 " plane %d, cursor: %d\n", 1827 " plane %d, cursor: %d\n",
1828 plane_wm, cursor_wm); 1828 plane_wm, cursor_wm);
1829 enabled |= 2; 1829 enabled |= 1 << PIPE_B;
1830 } 1830 }
1831 1831
1832 /* 1832 /*
@@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1904 unsigned int enabled; 1904 unsigned int enabled;
1905 1905
1906 enabled = 0; 1906 enabled = 0;
1907 if (g4x_compute_wm0(dev, 0, 1907 if (g4x_compute_wm0(dev, PIPE_A,
1908 &sandybridge_display_wm_info, latency, 1908 &sandybridge_display_wm_info, latency,
1909 &sandybridge_cursor_wm_info, latency, 1909 &sandybridge_cursor_wm_info, latency,
1910 &plane_wm, &cursor_wm)) { 1910 &plane_wm, &cursor_wm)) {
@@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1915 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1915 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1916 " plane %d, " "cursor: %d\n", 1916 " plane %d, " "cursor: %d\n",
1917 plane_wm, cursor_wm); 1917 plane_wm, cursor_wm);
1918 enabled |= 1; 1918 enabled |= 1 << PIPE_A;
1919 } 1919 }
1920 1920
1921 if (g4x_compute_wm0(dev, 1, 1921 if (g4x_compute_wm0(dev, PIPE_B,
1922 &sandybridge_display_wm_info, latency, 1922 &sandybridge_display_wm_info, latency,
1923 &sandybridge_cursor_wm_info, latency, 1923 &sandybridge_cursor_wm_info, latency,
1924 &plane_wm, &cursor_wm)) { 1924 &plane_wm, &cursor_wm)) {
@@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1929 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1929 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1930 " plane %d, cursor: %d\n", 1930 " plane %d, cursor: %d\n",
1931 plane_wm, cursor_wm); 1931 plane_wm, cursor_wm);
1932 enabled |= 2; 1932 enabled |= 1 << PIPE_B;
1933 } 1933 }
1934 1934
1935 if (g4x_compute_wm0(dev, 2, 1935 if (g4x_compute_wm0(dev, PIPE_C,
1936 &sandybridge_display_wm_info, latency, 1936 &sandybridge_display_wm_info, latency,
1937 &sandybridge_cursor_wm_info, latency, 1937 &sandybridge_cursor_wm_info, latency,
1938 &plane_wm, &cursor_wm)) { 1938 &plane_wm, &cursor_wm)) {
@@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1943 DRM_DEBUG_KMS("FIFO watermarks For pipe C -" 1943 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1944 " plane %d, cursor: %d\n", 1944 " plane %d, cursor: %d\n",
1945 plane_wm, cursor_wm); 1945 plane_wm, cursor_wm);
1946 enabled |= 3; 1946 enabled |= 1 << PIPE_C;
1947 } 1947 }
1948 1948
1949 /* 1949 /*
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index f9889658329b..77b8a45fb10a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
46 46
47static inline void mga_wait_vsync(struct mga_device *mdev) 47static inline void mga_wait_vsync(struct mga_device *mdev)
48{ 48{
49 unsigned int count = 0; 49 unsigned long timeout = jiffies + HZ/10;
50 unsigned int status = 0; 50 unsigned int status = 0;
51 51
52 do { 52 do {
53 status = RREG32(MGAREG_Status); 53 status = RREG32(MGAREG_Status);
54 count++; 54 } while ((status & 0x08) && time_before(jiffies, timeout));
55 } while ((status & 0x08) && (count < 250000)); 55 timeout = jiffies + HZ/10;
56 count = 0;
57 status = 0; 56 status = 0;
58 do { 57 do {
59 status = RREG32(MGAREG_Status); 58 status = RREG32(MGAREG_Status);
60 count++; 59 } while (!(status & 0x08) && time_before(jiffies, timeout));
61 } while (!(status & 0x08) && (count < 250000));
62} 60}
63 61
64static inline void mga_wait_busy(struct mga_device *mdev) 62static inline void mga_wait_busy(struct mga_device *mdev)
65{ 63{
66 unsigned int count = 0; 64 unsigned long timeout = jiffies + HZ;
67 unsigned int status = 0; 65 unsigned int status = 0;
68 do { 66 do {
69 status = RREG8(MGAREG_Status + 2); 67 status = RREG8(MGAREG_Status + 2);
70 count++; 68 } while ((status & 0x01) && time_before(jiffies, timeout));
71 } while ((status & 0x01) && (count < 500000));
72} 69}
73 70
74/* 71/*
@@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
189 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 186 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
190 tmp = RREG8(DAC_DATA); 187 tmp = RREG8(DAC_DATA);
191 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 188 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
192 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 189 WREG8(DAC_DATA, tmp);
193 190
194 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 191 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
195 tmp = RREG8(DAC_DATA); 192 tmp = RREG8(DAC_DATA);
196 tmp |= MGA1064_REMHEADCTL_CLKDIS; 193 tmp |= MGA1064_REMHEADCTL_CLKDIS;
197 WREG_DAC(MGA1064_REMHEADCTL, tmp); 194 WREG8(DAC_DATA, tmp);
198 195
199 /* select PLL Set C */ 196 /* select PLL Set C */
200 tmp = RREG8(MGAREG_MEM_MISC_READ); 197 tmp = RREG8(MGAREG_MEM_MISC_READ);
@@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
204 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 201 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
205 tmp = RREG8(DAC_DATA); 202 tmp = RREG8(DAC_DATA);
206 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; 203 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
207 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 204 WREG8(DAC_DATA, tmp);
208 205
209 udelay(500); 206 udelay(500);
210 207
@@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
212 WREG8(DAC_INDEX, MGA1064_VREF_CTL); 209 WREG8(DAC_INDEX, MGA1064_VREF_CTL);
213 tmp = RREG8(DAC_DATA); 210 tmp = RREG8(DAC_DATA);
214 tmp &= ~0x04; 211 tmp &= ~0x04;
215 WREG_DAC(MGA1064_VREF_CTL, tmp); 212 WREG8(DAC_DATA, tmp);
216 213
217 udelay(50); 214 udelay(50);
218 215
@@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
236 tmp = RREG8(DAC_DATA); 233 tmp = RREG8(DAC_DATA);
237 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 234 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
238 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 235 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
239 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 236 WREG8(DAC_DATA, tmp);
240 237
241 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 238 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
242 tmp = RREG8(DAC_DATA); 239 tmp = RREG8(DAC_DATA);
243 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; 240 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
244 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; 241 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
245 WREG_DAC(MGA1064_REMHEADCTL, tmp); 242 WREG8(DAC_DATA, tmp);
246 243
247 /* reset dotclock rate bit */ 244 /* reset dotclock rate bit */
248 WREG8(MGAREG_SEQ_INDEX, 1); 245 WREG8(MGAREG_SEQ_INDEX, 1);
@@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
253 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 250 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
254 tmp = RREG8(DAC_DATA); 251 tmp = RREG8(DAC_DATA);
255 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 252 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
256 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 253 WREG8(DAC_DATA, tmp);
257 254
258 vcount = RREG8(MGAREG_VCOUNT); 255 vcount = RREG8(MGAREG_VCOUNT);
259 256
@@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
318 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 315 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
319 tmp = RREG8(DAC_DATA); 316 tmp = RREG8(DAC_DATA);
320 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 317 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
321 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 318 WREG8(DAC_DATA, tmp);
322 319
323 tmp = RREG8(MGAREG_MEM_MISC_READ); 320 tmp = RREG8(MGAREG_MEM_MISC_READ);
324 tmp |= 0x3 << 2; 321 tmp |= 0x3 << 2;
@@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
326 323
327 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 324 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
328 tmp = RREG8(DAC_DATA); 325 tmp = RREG8(DAC_DATA);
329 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40); 326 WREG8(DAC_DATA, tmp & ~0x40);
330 327
331 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 328 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
332 tmp = RREG8(DAC_DATA); 329 tmp = RREG8(DAC_DATA);
333 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 330 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
334 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 331 WREG8(DAC_DATA, tmp);
335 332
336 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); 333 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
337 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); 334 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
@@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
342 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 339 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
343 tmp = RREG8(DAC_DATA); 340 tmp = RREG8(DAC_DATA);
344 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 341 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
345 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 342 WREG8(DAC_DATA, tmp);
346 343
347 udelay(500); 344 udelay(500);
348 345
@@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
350 tmp = RREG8(DAC_DATA); 347 tmp = RREG8(DAC_DATA);
351 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 348 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
352 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 349 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
353 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 350 WREG8(DAC_DATA, tmp);
354 351
355 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 352 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
356 tmp = RREG8(DAC_DATA); 353 tmp = RREG8(DAC_DATA);
357 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40); 354 WREG8(DAC_DATA, tmp | 0x40);
358 355
359 tmp = RREG8(MGAREG_MEM_MISC_READ); 356 tmp = RREG8(MGAREG_MEM_MISC_READ);
360 tmp |= (0x3 << 2); 357 tmp |= (0x3 << 2);
@@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
363 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 360 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
364 tmp = RREG8(DAC_DATA); 361 tmp = RREG8(DAC_DATA);
365 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 362 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
366 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 363 WREG8(DAC_DATA, tmp);
367 364
368 return 0; 365 return 0;
369} 366}
@@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
416 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 413 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
417 tmp = RREG8(DAC_DATA); 414 tmp = RREG8(DAC_DATA);
418 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 415 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
419 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 416 WREG8(DAC_DATA, tmp);
420 417
421 tmp = RREG8(MGAREG_MEM_MISC_READ); 418 tmp = RREG8(MGAREG_MEM_MISC_READ);
422 tmp |= 0x3 << 2; 419 tmp |= 0x3 << 2;
@@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
425 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 422 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
426 tmp = RREG8(DAC_DATA); 423 tmp = RREG8(DAC_DATA);
427 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 424 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
428 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 425 WREG8(DAC_DATA, tmp);
429 426
430 udelay(500); 427 udelay(500);
431 428
@@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
439 tmp = RREG8(DAC_DATA); 436 tmp = RREG8(DAC_DATA);
440 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 437 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
441 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 438 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
442 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 439 WREG8(DAC_DATA, tmp);
443 440
444 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 441 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
445 tmp = RREG8(DAC_DATA); 442 tmp = RREG8(DAC_DATA);
446 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 443 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
447 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 444 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
448 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 445 WREG8(DAC_DATA, tmp);
449 446
450 vcount = RREG8(MGAREG_VCOUNT); 447 vcount = RREG8(MGAREG_VCOUNT);
451 448
@@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
515 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 512 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
516 tmp = RREG8(DAC_DATA); 513 tmp = RREG8(DAC_DATA);
517 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 514 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
518 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 515 WREG8(DAC_DATA, tmp);
519 516
520 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 517 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
521 tmp = RREG8(DAC_DATA); 518 tmp = RREG8(DAC_DATA);
522 tmp |= MGA1064_REMHEADCTL_CLKDIS; 519 tmp |= MGA1064_REMHEADCTL_CLKDIS;
523 WREG_DAC(MGA1064_REMHEADCTL, tmp); 520 WREG8(DAC_DATA, tmp);
524 521
525 tmp = RREG8(MGAREG_MEM_MISC_READ); 522 tmp = RREG8(MGAREG_MEM_MISC_READ);
526 tmp |= (0x3<<2) | 0xc0; 523 tmp |= (0x3<<2) | 0xc0;
@@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
530 tmp = RREG8(DAC_DATA); 527 tmp = RREG8(DAC_DATA);
531 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 528 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
532 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 529 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
533 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 530 WREG8(DAC_DATA, tmp);
534 531
535 udelay(500); 532 udelay(500);
536 533
@@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
657 WREG_DAC(MGA1064_GEN_IO_DATA, tmp); 654 WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
658} 655}
659 656
660 657/*
658 This is how the framebuffer base address is stored in g200 cards:
659 * Assume @offset is the gpu_addr variable of the framebuffer object
660 * Then addr is the number of _pixels_ (not bytes) from the start of
661 VRAM to the first pixel we want to display. (divided by 2 for 32bit
662 framebuffers)
663 * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
664 addr<20> -> CRTCEXT0<6>
665 addr<19-16> -> CRTCEXT0<3-0>
666 addr<15-8> -> CRTCC<7-0>
667 addr<7-0> -> CRTCD<7-0>
668 CRTCEXT0 has to be programmed last to trigger an update and make the
669 new addr variable take effect.
670 */
661void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) 671void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
662{ 672{
663 struct mga_device *mdev = crtc->dev->dev_private; 673 struct mga_device *mdev = crtc->dev->dev_private;
664 u32 addr; 674 u32 addr;
665 int count; 675 int count;
676 u8 crtcext0;
666 677
667 while (RREG8(0x1fda) & 0x08); 678 while (RREG8(0x1fda) & 0x08);
668 while (!(RREG8(0x1fda) & 0x08)); 679 while (!(RREG8(0x1fda) & 0x08));
@@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
670 count = RREG8(MGAREG_VCOUNT) + 2; 681 count = RREG8(MGAREG_VCOUNT) + 2;
671 while (RREG8(MGAREG_VCOUNT) < count); 682 while (RREG8(MGAREG_VCOUNT) < count);
672 683
673 addr = offset >> 2; 684 WREG8(MGAREG_CRTCEXT_INDEX, 0);
685 crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
686 crtcext0 &= 0xB0;
687 addr = offset / 8;
688 /* Can't store addresses any higher than that...
689 but we also don't have more than 16MB of memory, so it should be fine. */
690 WARN_ON(addr > 0x1fffff);
691 crtcext0 |= (!!(addr & (1<<20)))<<6;
674 WREG_CRT(0x0d, (u8)(addr & 0xff)); 692 WREG_CRT(0x0d, (u8)(addr & 0xff));
675 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); 693 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
676 WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf); 694 WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
677} 695}
678 696
679 697
@@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
829 847
830 848
831 for (i = 0; i < sizeof(dacvalue); i++) { 849 for (i = 0; i < sizeof(dacvalue); i++) {
832 if ((i <= 0x03) || 850 if ((i <= 0x17) ||
833 (i == 0x07) ||
834 (i == 0x0b) ||
835 (i == 0x0f) ||
836 ((i >= 0x13) && (i <= 0x17)) ||
837 (i == 0x1b) || 851 (i == 0x1b) ||
838 (i == 0x1c) || 852 (i == 0x1c) ||
839 ((i >= 0x1f) && (i <= 0x29)) || 853 ((i >= 0x1f) && (i <= 0x29)) ||
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 08b0823c93d5..f86771481317 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -277,7 +277,7 @@ out_unref:
277 return 0; 277 return 0;
278} 278}
279 279
280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) 280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
281{ 281{
282 int irq_num; 282 int irq_num;
283 long addr = qdev->io_base + port; 283 long addr = qdev->io_base + port;
@@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
285 285
286 mutex_lock(&qdev->async_io_mutex); 286 mutex_lock(&qdev->async_io_mutex);
287 irq_num = atomic_read(&qdev->irq_received_io_cmd); 287 irq_num = atomic_read(&qdev->irq_received_io_cmd);
288
289
290 if (qdev->last_sent_io_cmd > irq_num) { 288 if (qdev->last_sent_io_cmd > irq_num) {
291 ret = wait_event_interruptible(qdev->io_cmd_event, 289 if (intr)
292 atomic_read(&qdev->irq_received_io_cmd) > irq_num); 290 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
293 if (ret) 291 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
292 else
293 ret = wait_event_timeout(qdev->io_cmd_event,
294 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
295 /* 0 is timeout, just bail the "hw" has gone away */
296 if (ret <= 0)
294 goto out; 297 goto out;
295 irq_num = atomic_read(&qdev->irq_received_io_cmd); 298 irq_num = atomic_read(&qdev->irq_received_io_cmd);
296 } 299 }
297 outb(val, addr); 300 outb(val, addr);
298 qdev->last_sent_io_cmd = irq_num + 1; 301 qdev->last_sent_io_cmd = irq_num + 1;
299 ret = wait_event_interruptible(qdev->io_cmd_event, 302 if (intr)
300 atomic_read(&qdev->irq_received_io_cmd) > irq_num); 303 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
304 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
305 else
306 ret = wait_event_timeout(qdev->io_cmd_event,
307 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
301out: 308out:
309 if (ret > 0)
310 ret = 0;
302 mutex_unlock(&qdev->async_io_mutex); 311 mutex_unlock(&qdev->async_io_mutex);
303 return ret; 312 return ret;
304} 313}
@@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
308 int ret; 317 int ret;
309 318
310restart: 319restart:
311 ret = wait_for_io_cmd_user(qdev, val, port); 320 ret = wait_for_io_cmd_user(qdev, val, port, false);
312 if (ret == -ERESTARTSYS) 321 if (ret == -ERESTARTSYS)
313 goto restart; 322 goto restart;
314} 323}
@@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
340 mutex_lock(&qdev->update_area_mutex); 349 mutex_lock(&qdev->update_area_mutex);
341 qdev->ram_header->update_area = *area; 350 qdev->ram_header->update_area = *area;
342 qdev->ram_header->update_surface = surface_id; 351 qdev->ram_header->update_surface = surface_id;
343 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); 352 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
344 mutex_unlock(&qdev->update_area_mutex); 353 mutex_unlock(&qdev->update_area_mutex);
345 return ret; 354 return ret;
346} 355}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index fcfd4436ceed..823d29e926ec 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -428,10 +428,10 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
428 int inc = 1; 428 int inc = 1;
429 429
430 qobj = gem_to_qxl_bo(qxl_fb->obj); 430 qobj = gem_to_qxl_bo(qxl_fb->obj);
431 if (qxl_fb != qdev->active_user_framebuffer) { 431 /* if we aren't primary surface ignore this */
432 DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n", 432 if (!qobj->is_primary)
433 __func__, qxl_fb, qdev->active_user_framebuffer); 433 return 0;
434 } 434
435 if (!num_clips) { 435 if (!num_clips) {
436 num_clips = 1; 436 num_clips = 1;
437 clips = &norect; 437 clips = &norect;
@@ -604,7 +604,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
604 mode->hdisplay, 604 mode->hdisplay,
605 mode->vdisplay); 605 mode->vdisplay);
606 } 606 }
607 qdev->mode_set = true;
608 return 0; 607 return 0;
609} 608}
610 609
@@ -893,7 +892,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
893{ 892{
894 struct drm_gem_object *obj; 893 struct drm_gem_object *obj;
895 struct qxl_framebuffer *qxl_fb; 894 struct qxl_framebuffer *qxl_fb;
896 struct qxl_device *qdev = dev->dev_private;
897 int ret; 895 int ret;
898 896
899 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 897 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
@@ -909,13 +907,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
909 return NULL; 907 return NULL;
910 } 908 }
911 909
912 if (qdev->active_user_framebuffer) {
913 DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
914 __func__,
915 qdev->active_user_framebuffer, qxl_fb);
916 }
917 qdev->active_user_framebuffer = qxl_fb;
918
919 return &qxl_fb->base; 910 return &qxl_fb->base;
920} 911}
921 912
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 52b582c211da..43d06ab28a21 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -255,12 +255,6 @@ struct qxl_device {
255 struct qxl_gem gem; 255 struct qxl_gem gem;
256 struct qxl_mode_info mode_info; 256 struct qxl_mode_info mode_info;
257 257
258 /*
259 * last created framebuffer with fb_create
260 * only used by debugfs dumbppm
261 */
262 struct qxl_framebuffer *active_user_framebuffer;
263
264 struct fb_info *fbdev_info; 258 struct fb_info *fbdev_info;
265 struct qxl_framebuffer *fbdev_qfb; 259 struct qxl_framebuffer *fbdev_qfb;
266 void *ram_physical; 260 void *ram_physical;
@@ -270,7 +264,6 @@ struct qxl_device {
270 struct qxl_ring *cursor_ring; 264 struct qxl_ring *cursor_ring;
271 265
272 struct qxl_ram_header *ram_header; 266 struct qxl_ram_header *ram_header;
273 bool mode_set;
274 267
275 bool primary_created; 268 bool primary_created;
276 269
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 04b64f9cbfdb..6db7370373ea 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -294,6 +294,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
294 goto out; 294 goto out;
295 295
296 if (!qobj->pin_count) { 296 if (!qobj->pin_count) {
297 qxl_ttm_placement_from_domain(qobj, qobj->type);
297 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 298 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
298 true, false); 299 true, false);
299 if (unlikely(ret)) 300 if (unlikely(ret))
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 865e2c9980db..60170ea5e3a2 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); 75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
76 76
77 for (i = 0; i < nr; ++i) { 77 for (i = 0; i < nr; ++i) {
78 if (DRM_COPY_FROM_USER_UNCHECKED 78 if (DRM_COPY_FROM_USER
79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) { 79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
80 DRM_ERROR("copy cliprect faulted\n"); 80 DRM_ERROR("copy cliprect faulted\n");
81 return -EFAULT; 81 return -EFAULT;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d33f484ace48..094e7e5ea39e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -147,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {}
147#endif 147#endif
148 148
149int radeon_no_wb; 149int radeon_no_wb;
150int radeon_modeset = 1; 150int radeon_modeset = -1;
151int radeon_dynclks = -1; 151int radeon_dynclks = -1;
152int radeon_r4xx_atom = 0; 152int radeon_r4xx_atom = 0;
153int radeon_agpmode = 0; 153int radeon_agpmode = 0;
@@ -456,6 +456,16 @@ static struct pci_driver radeon_kms_pci_driver = {
456 456
457static int __init radeon_init(void) 457static int __init radeon_init(void)
458{ 458{
459#ifdef CONFIG_VGA_CONSOLE
460 if (vgacon_text_force() && radeon_modeset == -1) {
461 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
462 radeon_modeset = 0;
463 }
464#endif
465 /* set to modesetting by default if not nomodeset */
466 if (radeon_modeset == -1)
467 radeon_modeset = 1;
468
459 if (radeon_modeset == 1) { 469 if (radeon_modeset == 1) {
460 DRM_INFO("radeon kernel modesetting enabled.\n"); 470 DRM_INFO("radeon kernel modesetting enabled.\n");
461 driver = &kms_driver; 471 driver = &kms_driver;
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 699187ab3800..5b9ac32801c7 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
1002 kill_guest(&lg->cpus[0], 1002 kill_guest(&lg->cpus[0],
1003 "Cannot populate switcher mapping"); 1003 "Cannot populate switcher mapping");
1004 } 1004 }
1005 lg->pgdirs[pgdir].last_host_cpu = -1;
1005 } 1006 }
1006} 1007}
1007 1008
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 375c109607ff..f4f3038c1df0 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1130,6 +1130,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1130 struct variant_data *variant = host->variant; 1130 struct variant_data *variant = host->variant;
1131 u32 pwr = 0; 1131 u32 pwr = 0;
1132 unsigned long flags; 1132 unsigned long flags;
1133 int ret;
1133 1134
1134 pm_runtime_get_sync(mmc_dev(mmc)); 1135 pm_runtime_get_sync(mmc_dev(mmc));
1135 1136
@@ -1161,8 +1162,12 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1161 break; 1162 break;
1162 case MMC_POWER_ON: 1163 case MMC_POWER_ON:
1163 if (!IS_ERR(mmc->supply.vqmmc) && 1164 if (!IS_ERR(mmc->supply.vqmmc) &&
1164 !regulator_is_enabled(mmc->supply.vqmmc)) 1165 !regulator_is_enabled(mmc->supply.vqmmc)) {
1165 regulator_enable(mmc->supply.vqmmc); 1166 ret = regulator_enable(mmc->supply.vqmmc);
1167 if (ret < 0)
1168 dev_err(mmc_dev(mmc),
1169 "failed to enable vqmmc regulator\n");
1170 }
1166 1171
1167 pwr |= MCI_PWR_ON; 1172 pwr |= MCI_PWR_ON;
1168 break; 1173 break;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 7ffc756131a2..547098086773 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -43,7 +43,7 @@ config CAIF_HSI
43 43
44config CAIF_VIRTIO 44config CAIF_VIRTIO
45 tristate "CAIF virtio transport driver" 45 tristate "CAIF virtio transport driver"
46 depends on CAIF 46 depends on CAIF && HAS_DMA
47 select VHOST_RING 47 select VHOST_RING
48 select VIRTIO 48 select VIRTIO
49 select GENERIC_ALLOCATOR 49 select GENERIC_ALLOCATOR
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index de570a8f8967..072c6f14e8fc 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -632,7 +632,6 @@ struct vortex_private {
632 pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ 632 pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
633 open:1, 633 open:1,
634 medialock:1, 634 medialock:1,
635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
636 large_frames:1, /* accept large frames */ 635 large_frames:1, /* accept large frames */
637 handling_irq:1; /* private in_irq indicator */ 636 handling_irq:1; /* private in_irq indicator */
638 /* {get|set}_wol operations are already serialized by rtnl. 637 /* {get|set}_wol operations are already serialized by rtnl.
@@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev,
1012 if (rc < 0) 1011 if (rc < 0)
1013 goto out; 1012 goto out;
1014 1013
1014 rc = pci_request_regions(pdev, DRV_NAME);
1015 if (rc < 0) {
1016 pci_disable_device(pdev);
1017 goto out;
1018 }
1019
1015 unit = vortex_cards_found; 1020 unit = vortex_cards_found;
1016 1021
1017 if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { 1022 if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
@@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev,
1027 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ 1032 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
1028 ioaddr = pci_iomap(pdev, 0, 0); 1033 ioaddr = pci_iomap(pdev, 0, 0);
1029 if (!ioaddr) { 1034 if (!ioaddr) {
1035 pci_release_regions(pdev);
1030 pci_disable_device(pdev); 1036 pci_disable_device(pdev);
1031 rc = -ENOMEM; 1037 rc = -ENOMEM;
1032 goto out; 1038 goto out;
@@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev,
1036 ent->driver_data, unit); 1042 ent->driver_data, unit);
1037 if (rc < 0) { 1043 if (rc < 0) {
1038 pci_iounmap(pdev, ioaddr); 1044 pci_iounmap(pdev, ioaddr);
1045 pci_release_regions(pdev);
1039 pci_disable_device(pdev); 1046 pci_disable_device(pdev);
1040 goto out; 1047 goto out;
1041 } 1048 }
@@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1178 1185
1179 /* PCI-only startup logic */ 1186 /* PCI-only startup logic */
1180 if (pdev) { 1187 if (pdev) {
1181 /* EISA resources already marked, so only PCI needs to do this here */
1182 /* Ignore return value, because Cardbus drivers already allocate for us */
1183 if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
1184 vp->must_free_region = 1;
1185
1186 /* enable bus-mastering if necessary */ 1188 /* enable bus-mastering if necessary */
1187 if (vci->flags & PCI_USES_MASTER) 1189 if (vci->flags & PCI_USES_MASTER)
1188 pci_set_master(pdev); 1190 pci_set_master(pdev);
@@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1220 &vp->rx_ring_dma); 1222 &vp->rx_ring_dma);
1221 retval = -ENOMEM; 1223 retval = -ENOMEM;
1222 if (!vp->rx_ring) 1224 if (!vp->rx_ring)
1223 goto free_region; 1225 goto free_device;
1224 1226
1225 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); 1227 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1226 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; 1228 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
@@ -1484,9 +1486,7 @@ free_ring:
1484 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 1486 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1485 vp->rx_ring, 1487 vp->rx_ring,
1486 vp->rx_ring_dma); 1488 vp->rx_ring_dma);
1487free_region: 1489free_device:
1488 if (vp->must_free_region)
1489 release_region(dev->base_addr, vci->io_size);
1490 free_netdev(dev); 1490 free_netdev(dev);
1491 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); 1491 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
1492out: 1492out:
@@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev)
3254 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 3254 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3255 vp->rx_ring, 3255 vp->rx_ring,
3256 vp->rx_ring_dma); 3256 vp->rx_ring_dma);
3257 if (vp->must_free_region) 3257
3258 release_region(dev->base_addr, vp->io_size); 3258 pci_release_regions(pdev);
3259
3259 free_netdev(dev); 3260 free_netdev(dev);
3260} 3261}
3261 3262
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ce4a030d3d0c..07f7ef05c3f2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3236,9 +3236,10 @@ bnad_init(struct bnad *bnad,
3236 3236
3237 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); 3237 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3238 bnad->work_q = create_singlethread_workqueue(bnad->wq_name); 3238 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3239 3239 if (!bnad->work_q) {
3240 if (!bnad->work_q) 3240 iounmap(bnad->bar0);
3241 return -ENOMEM; 3241 return -ENOMEM;
3242 }
3242 3243
3243 return 0; 3244 return 0;
3244} 3245}
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 1194446f859a..768285ec10f4 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,7 +22,7 @@ if NET_CADENCE
22 22
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on GENERIC_HARDIRQS 25 depends on GENERIC_HARDIRQS && HAS_DMA
26 select NET_CORE 26 select NET_CORE
27 select MACB 27 select MACB
28 ---help--- 28 ---help---
@@ -31,6 +31,7 @@ config ARM_AT91_ETHER
31 31
32config MACB 32config MACB
33 tristate "Cadence MACB/GEM support" 33 tristate "Cadence MACB/GEM support"
34 depends on HAS_DMA
34 select PHYLIB 35 select PHYLIB
35 ---help--- 36 ---help---
36 The Cadence MACB ethernet interface is found on many Atmel AT32 and 37 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index aba435c3d4ae..184a063bed5f 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,6 @@
1config NET_CALXEDA_XGMAC 1config NET_CALXEDA_XGMAC
2 tristate "Calxeda 1G/10G XGMAC Ethernet driver" 2 tristate "Calxeda 1G/10G XGMAC Ethernet driver"
3 depends on HAS_IOMEM 3 depends on HAS_IOMEM && HAS_DMA
4 select CRC32 4 select CRC32
5 help 5 help
6 This is the driver for the XGMAC Ethernet IP block found on Calxeda 6 This is the driver for the XGMAC Ethernet IP block found on Calxeda
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index aff0310a778b..ca9825ca88c9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -87,6 +87,8 @@
87#define FEC_QUIRK_HAS_GBIT (1 << 3) 87#define FEC_QUIRK_HAS_GBIT (1 << 3)
88/* Controller has extend desc buffer */ 88/* Controller has extend desc buffer */
89#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) 89#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
90/* Controller has hardware checksum support */
91#define FEC_QUIRK_HAS_CSUM (1 << 5)
90 92
91static struct platform_device_id fec_devtype[] = { 93static struct platform_device_id fec_devtype[] = {
92 { 94 {
@@ -105,7 +107,7 @@ static struct platform_device_id fec_devtype[] = {
105 }, { 107 }, {
106 .name = "imx6q-fec", 108 .name = "imx6q-fec",
107 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 109 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
108 FEC_QUIRK_HAS_BUFDESC_EX, 110 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM,
109 }, { 111 }, {
110 .name = "mvf-fec", 112 .name = "mvf-fec",
111 .driver_data = FEC_QUIRK_ENET_MAC, 113 .driver_data = FEC_QUIRK_ENET_MAC,
@@ -1744,6 +1746,8 @@ static const struct net_device_ops fec_netdev_ops = {
1744static int fec_enet_init(struct net_device *ndev) 1746static int fec_enet_init(struct net_device *ndev)
1745{ 1747{
1746 struct fec_enet_private *fep = netdev_priv(ndev); 1748 struct fec_enet_private *fep = netdev_priv(ndev);
1749 const struct platform_device_id *id_entry =
1750 platform_get_device_id(fep->pdev);
1747 struct bufdesc *cbd_base; 1751 struct bufdesc *cbd_base;
1748 1752
1749 /* Allocate memory for buffer descriptors. */ 1753 /* Allocate memory for buffer descriptors. */
@@ -1775,12 +1779,14 @@ static int fec_enet_init(struct net_device *ndev)
1775 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 1779 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1776 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 1780 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
1777 1781
1778 /* enable hw accelerator */ 1782 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
1779 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 1783 /* enable hw accelerator */
1780 | NETIF_F_RXCSUM); 1784 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1781 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 1785 | NETIF_F_RXCSUM);
1782 | NETIF_F_RXCSUM); 1786 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1783 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 1787 | NETIF_F_RXCSUM);
1788 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
1789 }
1784 1790
1785 fec_restart(ndev, 0); 1791 fec_restart(ndev, 0);
1786 1792
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 4989481c19f0..d300a0c0eafc 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -359,10 +359,26 @@ static int emac_reset(struct emac_instance *dev)
359 } 359 }
360 360
361#ifdef CONFIG_PPC_DCR_NATIVE 361#ifdef CONFIG_PPC_DCR_NATIVE
362 /* Enable internal clock source */ 362 /*
363 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) 363 * PPC460EX/GT Embedded Processor Advanced User's Manual
364 dcri_clrset(SDR0, SDR0_ETH_CFG, 364 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
365 0, SDR0_ETH_CFG_ECS << dev->cell_index); 365 * Note: The PHY must provide a TX Clk in order to perform a soft reset
366 * of the EMAC. If none is present, select the internal clock
367 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
368 * After a soft reset, select the external clock.
369 */
370 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
371 if (dev->phy_address == 0xffffffff &&
372 dev->phy_map == 0xffffffff) {
373 /* No PHY: select internal loop clock before reset */
374 dcri_clrset(SDR0, SDR0_ETH_CFG,
375 0, SDR0_ETH_CFG_ECS << dev->cell_index);
376 } else {
377 /* PHY present: select external clock before reset */
378 dcri_clrset(SDR0, SDR0_ETH_CFG,
379 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
380 }
381 }
366#endif 382#endif
367 383
368 out_be32(&p->mr0, EMAC_MR0_SRST); 384 out_be32(&p->mr0, EMAC_MR0_SRST);
@@ -370,10 +386,14 @@ static int emac_reset(struct emac_instance *dev)
370 --n; 386 --n;
371 387
372#ifdef CONFIG_PPC_DCR_NATIVE 388#ifdef CONFIG_PPC_DCR_NATIVE
373 /* Enable external clock source */ 389 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
374 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) 390 if (dev->phy_address == 0xffffffff &&
375 dcri_clrset(SDR0, SDR0_ETH_CFG, 391 dev->phy_map == 0xffffffff) {
376 SDR0_ETH_CFG_ECS << dev->cell_index, 0); 392 /* No PHY: restore external clock source after reset */
393 dcri_clrset(SDR0, SDR0_ETH_CFG,
394 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
395 }
396 }
377#endif 397#endif
378 398
379 if (n) { 399 if (n) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 91f2b2c43c12..d3f508697a3d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
60 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 60 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
61 if (user_prio >= 0) { 61 if (user_prio >= 0) {
62 context->pri_path.sched_queue |= user_prio << 3; 62 context->pri_path.sched_queue |= user_prio << 3;
63 context->pri_path.feup = 1 << 6; 63 context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
64 } 64 }
65 context->pri_path.counter_index = 0xff; 65 context->pri_path.counter_index = 0xff;
66 context->cqn_send = cpu_to_be32(cqn); 66 context->cqn_send = cpu_to_be32(cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index b147bdd40768..58a8e535d698 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -131,7 +131,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
131 [2] = "RSS XOR Hash Function support", 131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device manage flow steering support", 132 [3] = "Device manage flow steering support",
133 [4] = "Automatic MAC reassignment support", 133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support" 134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support"
135 }; 137 };
136 int i; 138 int i;
137 139
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index e12e0d2e0ee0..1157f028a90f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -372,24 +372,29 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
372 if (MLX4_QP_ST_RC == qp_type) 372 if (MLX4_QP_ST_RC == qp_type)
373 return -EINVAL; 373 return -EINVAL;
374 374
375 /* force strip vlan by clear vsd */
376 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
377 if (0 != vp_oper->state.default_vlan) {
378 qpc->pri_path.vlan_control =
379 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
380 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
381 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
382 } else { /* priority tagged */
383 qpc->pri_path.vlan_control =
384 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
385 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
386 }
387
388 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
375 qpc->pri_path.vlan_index = vp_oper->vlan_idx; 389 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
376 qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/ 390 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
377 qpc->pri_path.feup |= 1 << 3; /* set fvl bit */ 391 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
378 qpc->pri_path.sched_queue &= 0xC7; 392 qpc->pri_path.sched_queue &= 0xC7;
379 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; 393 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
380 mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
381 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
382 (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
383 vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
384 (int)(qpc->pri_path.fl));
385 } 394 }
386 if (vp_oper->state.spoofchk) { 395 if (vp_oper->state.spoofchk) {
387 qpc->pri_path.feup |= 1 << 5; /* set fsm bit */; 396 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
388 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 397 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
389 mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n",
390 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
391 (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
392 vp_oper->mac_idx);
393 } 398 }
394 return 0; 399 return 0;
395} 400}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 90c253b145ef..019c5f78732e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -429,6 +429,7 @@ struct qlcnic_hardware_context {
429 429
430 u16 port_type; 430 u16 port_type;
431 u16 board_type; 431 u16 board_type;
432 u16 supported_type;
432 433
433 u16 link_speed; 434 u16 link_speed;
434 u16 link_duplex; 435 u16 link_duplex;
@@ -1514,6 +1515,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
1514void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); 1515void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
1515void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); 1516void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
1516void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); 1517void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
1518int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
1517 1519
1518int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); 1520int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
1519int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); 1521int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index ea790a93ee7c..b4ff1e35a11d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -696,15 +696,14 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
696 return 1; 696 return 1;
697} 697}
698 698
699u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) 699u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
700{ 700{
701 u32 data; 701 u32 data;
702 unsigned long wait_time = 0;
703 struct qlcnic_hardware_context *ahw = adapter->ahw; 702 struct qlcnic_hardware_context *ahw = adapter->ahw;
704 /* wait for mailbox completion */ 703 /* wait for mailbox completion */
705 do { 704 do {
706 data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); 705 data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
707 if (++wait_time > QLCNIC_MBX_TIMEOUT) { 706 if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
708 data = QLCNIC_RCODE_TIMEOUT; 707 data = QLCNIC_RCODE_TIMEOUT;
709 break; 708 break;
710 } 709 }
@@ -720,8 +719,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
720 u16 opcode; 719 u16 opcode;
721 u8 mbx_err_code; 720 u8 mbx_err_code;
722 unsigned long flags; 721 unsigned long flags;
723 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
724 struct qlcnic_hardware_context *ahw = adapter->ahw; 722 struct qlcnic_hardware_context *ahw = adapter->ahw;
723 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
725 724
726 opcode = LSW(cmd->req.arg[0]); 725 opcode = LSW(cmd->req.arg[0]);
727 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { 726 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
@@ -754,15 +753,13 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
754 /* Signal FW about the impending command */ 753 /* Signal FW about the impending command */
755 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); 754 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
756poll: 755poll:
757 rsp = qlcnic_83xx_mbx_poll(adapter); 756 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
758 if (rsp != QLCNIC_RCODE_TIMEOUT) { 757 if (rsp != QLCNIC_RCODE_TIMEOUT) {
759 /* Get the FW response data */ 758 /* Get the FW response data */
760 fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); 759 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
761 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { 760 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
762 __qlcnic_83xx_process_aen(adapter); 761 __qlcnic_83xx_process_aen(adapter);
763 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 762 goto poll;
764 if (mbx_val)
765 goto poll;
766 } 763 }
767 mbx_err_code = QLCNIC_MBX_STATUS(fw_data); 764 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
768 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); 765 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
@@ -1276,11 +1273,13 @@ out:
1276 return err; 1273 return err;
1277} 1274}
1278 1275
1279static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) 1276static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
1277 int num_sds_ring)
1280{ 1278{
1281 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1279 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1282 struct qlcnic_host_sds_ring *sds_ring; 1280 struct qlcnic_host_sds_ring *sds_ring;
1283 struct qlcnic_host_rds_ring *rds_ring; 1281 struct qlcnic_host_rds_ring *rds_ring;
1282 u16 adapter_state = adapter->is_up;
1284 u8 ring; 1283 u8 ring;
1285 int ret; 1284 int ret;
1286 1285
@@ -1304,6 +1303,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
1304 ret = qlcnic_fw_create_ctx(adapter); 1303 ret = qlcnic_fw_create_ctx(adapter);
1305 if (ret) { 1304 if (ret) {
1306 qlcnic_detach(adapter); 1305 qlcnic_detach(adapter);
1306 if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
1307 adapter->max_sds_rings = num_sds_ring;
1308 qlcnic_attach(adapter);
1309 }
1307 netif_device_attach(netdev); 1310 netif_device_attach(netdev);
1308 return ret; 1311 return ret;
1309 } 1312 }
@@ -1596,7 +1599,8 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1596 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 1599 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1597 return -EBUSY; 1600 return -EBUSY;
1598 1601
1599 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); 1602 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
1603 max_sds_rings);
1600 if (ret) 1604 if (ret)
1601 goto fail_diag_alloc; 1605 goto fail_diag_alloc;
1602 1606
@@ -2830,6 +2834,23 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
2830 break; 2834 break;
2831 } 2835 }
2832 config = cmd.rsp.arg[3]; 2836 config = cmd.rsp.arg[3];
2837 if (QLC_83XX_SFP_PRESENT(config)) {
2838 switch (ahw->module_type) {
2839 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
2840 case LINKEVENT_MODULE_OPTICAL_SRLR:
2841 case LINKEVENT_MODULE_OPTICAL_LRM:
2842 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
2843 ahw->supported_type = PORT_FIBRE;
2844 break;
2845 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
2846 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
2847 case LINKEVENT_MODULE_TWINAX:
2848 ahw->supported_type = PORT_TP;
2849 break;
2850 default:
2851 ahw->supported_type = PORT_OTHER;
2852 }
2853 }
2833 if (config & 1) 2854 if (config & 1)
2834 err = 1; 2855 err = 1;
2835 } 2856 }
@@ -2838,7 +2859,8 @@ out:
2838 return config; 2859 return config;
2839} 2860}
2840 2861
2841int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) 2862int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
2863 struct ethtool_cmd *ecmd)
2842{ 2864{
2843 u32 config = 0; 2865 u32 config = 0;
2844 int status = 0; 2866 int status = 0;
@@ -2851,6 +2873,54 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
2851 ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); 2873 ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
2852 /* hard code until there is a way to get it from flash */ 2874 /* hard code until there is a way to get it from flash */
2853 ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; 2875 ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
2876
2877 if (netif_running(adapter->netdev) && ahw->has_link_events) {
2878 ethtool_cmd_speed_set(ecmd, ahw->link_speed);
2879 ecmd->duplex = ahw->link_duplex;
2880 ecmd->autoneg = ahw->link_autoneg;
2881 } else {
2882 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
2883 ecmd->duplex = DUPLEX_UNKNOWN;
2884 ecmd->autoneg = AUTONEG_DISABLE;
2885 }
2886
2887 if (ahw->port_type == QLCNIC_XGBE) {
2888 ecmd->supported = SUPPORTED_1000baseT_Full;
2889 ecmd->advertising = ADVERTISED_1000baseT_Full;
2890 } else {
2891 ecmd->supported = (SUPPORTED_10baseT_Half |
2892 SUPPORTED_10baseT_Full |
2893 SUPPORTED_100baseT_Half |
2894 SUPPORTED_100baseT_Full |
2895 SUPPORTED_1000baseT_Half |
2896 SUPPORTED_1000baseT_Full);
2897 ecmd->advertising = (ADVERTISED_100baseT_Half |
2898 ADVERTISED_100baseT_Full |
2899 ADVERTISED_1000baseT_Half |
2900 ADVERTISED_1000baseT_Full);
2901 }
2902
2903 switch (ahw->supported_type) {
2904 case PORT_FIBRE:
2905 ecmd->supported |= SUPPORTED_FIBRE;
2906 ecmd->advertising |= ADVERTISED_FIBRE;
2907 ecmd->port = PORT_FIBRE;
2908 ecmd->transceiver = XCVR_EXTERNAL;
2909 break;
2910 case PORT_TP:
2911 ecmd->supported |= SUPPORTED_TP;
2912 ecmd->advertising |= ADVERTISED_TP;
2913 ecmd->port = PORT_TP;
2914 ecmd->transceiver = XCVR_INTERNAL;
2915 break;
2916 default:
2917 ecmd->supported |= SUPPORTED_FIBRE;
2918 ecmd->advertising |= ADVERTISED_FIBRE;
2919 ecmd->port = PORT_OTHER;
2920 ecmd->transceiver = XCVR_EXTERNAL;
2921 break;
2922 }
2923 ecmd->phy_address = ahw->physical_port;
2854 return status; 2924 return status;
2855} 2925}
2856 2926
@@ -3046,7 +3116,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
3046 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 3116 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3047 return -EIO; 3117 return -EIO;
3048 3118
3049 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); 3119 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
3120 max_sds_rings);
3050 if (ret) 3121 if (ret)
3051 goto fail_diag_irq; 3122 goto fail_diag_irq;
3052 3123
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 1f1d85e6f2af..f5db67fc9f55 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -603,7 +603,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
603 603
604void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); 604void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
605void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); 605void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
606int qlcnic_83xx_get_settings(struct qlcnic_adapter *); 606int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
607int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); 607int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
608void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, 608void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
609 struct ethtool_pauseparam *); 609 struct ethtool_pauseparam *);
@@ -620,7 +620,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
620int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); 620int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
621int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); 621int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
622u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); 622u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
623u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *); 623u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
624void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); 624void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
625void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); 625void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
626#endif 626#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ab1d8d99cbd5..c67d1eb35e8f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -435,10 +435,6 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
435 } 435 }
436done: 436done:
437 netif_device_attach(netdev); 437 netif_device_attach(netdev);
438 if (netif_running(netdev)) {
439 netif_carrier_on(netdev);
440 netif_wake_queue(netdev);
441 }
442} 438}
443 439
444static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, 440static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
@@ -642,15 +638,21 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
642 638
643static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) 639static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
644{ 640{
641 struct qlcnic_hardware_context *ahw = adapter->ahw;
642
645 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); 643 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
646 clear_bit(__QLCNIC_RESETTING, &adapter->state);
647 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 644 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
648 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); 645 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
649 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 646 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
650 adapter->ahw->idc.quiesce_req = 0; 647
651 adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; 648 ahw->idc.quiesce_req = 0;
652 adapter->ahw->idc.err_code = 0; 649 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
653 adapter->ahw->idc.collect_dump = 0; 650 ahw->idc.err_code = 0;
651 ahw->idc.collect_dump = 0;
652 ahw->reset_context = 0;
653 adapter->tx_timeo_cnt = 0;
654
655 clear_bit(__QLCNIC_RESETTING, &adapter->state);
654} 656}
655 657
656/** 658/**
@@ -851,6 +853,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
851 /* Check for soft reset request */ 853 /* Check for soft reset request */
852 if (ahw->reset_context && 854 if (ahw->reset_context &&
853 !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { 855 !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
856 adapter->ahw->reset_context = 0;
854 qlcnic_83xx_idc_tx_soft_reset(adapter); 857 qlcnic_83xx_idc_tx_soft_reset(adapter);
855 return ret; 858 return ret;
856 } 859 }
@@ -914,6 +917,7 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
914static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) 917static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
915{ 918{
916 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); 919 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
920 clear_bit(__QLCNIC_RESETTING, &adapter->state);
917 adapter->ahw->idc.err_code = -EIO; 921 adapter->ahw->idc.err_code = -EIO;
918 922
919 return 0; 923 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 08efb4635007..f67652de5a63 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -131,12 +131,13 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
131 "ctx_lro_pkt_cnt", 131 "ctx_lro_pkt_cnt",
132 "ctx_ip_csum_error", 132 "ctx_ip_csum_error",
133 "ctx_rx_pkts_wo_ctx", 133 "ctx_rx_pkts_wo_ctx",
134 "ctx_rx_pkts_dropped_wo_sts", 134 "ctx_rx_pkts_drop_wo_sds_on_card",
135 "ctx_rx_pkts_drop_wo_sds_on_host",
135 "ctx_rx_osized_pkts", 136 "ctx_rx_osized_pkts",
136 "ctx_rx_pkts_dropped_wo_rds", 137 "ctx_rx_pkts_dropped_wo_rds",
137 "ctx_rx_unexpected_mcast_pkts", 138 "ctx_rx_unexpected_mcast_pkts",
138 "ctx_invalid_mac_address", 139 "ctx_invalid_mac_address",
139 "ctx_rx_rds_ring_prim_attemoted", 140 "ctx_rx_rds_ring_prim_attempted",
140 "ctx_rx_rds_ring_prim_success", 141 "ctx_rx_rds_ring_prim_success",
141 "ctx_num_lro_flows_added", 142 "ctx_num_lro_flows_added",
142 "ctx_num_lro_flows_removed", 143 "ctx_num_lro_flows_removed",
@@ -251,6 +252,18 @@ static int
251qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 252qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
252{ 253{
253 struct qlcnic_adapter *adapter = netdev_priv(dev); 254 struct qlcnic_adapter *adapter = netdev_priv(dev);
255
256 if (qlcnic_82xx_check(adapter))
257 return qlcnic_82xx_get_settings(adapter, ecmd);
258 else if (qlcnic_83xx_check(adapter))
259 return qlcnic_83xx_get_settings(adapter, ecmd);
260
261 return -EIO;
262}
263
264int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
265 struct ethtool_cmd *ecmd)
266{
254 struct qlcnic_hardware_context *ahw = adapter->ahw; 267 struct qlcnic_hardware_context *ahw = adapter->ahw;
255 u32 speed, reg; 268 u32 speed, reg;
256 int check_sfp_module = 0; 269 int check_sfp_module = 0;
@@ -276,10 +289,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
276 289
277 } else if (adapter->ahw->port_type == QLCNIC_XGBE) { 290 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
278 u32 val = 0; 291 u32 val = 0;
279 if (qlcnic_83xx_check(adapter)) 292 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
280 qlcnic_83xx_get_settings(adapter);
281 else
282 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
283 293
284 if (val == QLCNIC_PORT_MODE_802_3_AP) { 294 if (val == QLCNIC_PORT_MODE_802_3_AP) {
285 ecmd->supported = SUPPORTED_1000baseT_Full; 295 ecmd->supported = SUPPORTED_1000baseT_Full;
@@ -289,16 +299,13 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
289 ecmd->advertising = ADVERTISED_10000baseT_Full; 299 ecmd->advertising = ADVERTISED_10000baseT_Full;
290 } 300 }
291 301
292 if (netif_running(dev) && adapter->ahw->has_link_events) { 302 if (netif_running(adapter->netdev) && ahw->has_link_events) {
293 if (qlcnic_82xx_check(adapter)) { 303 reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
294 reg = QLCRD32(adapter, 304 speed = P3P_LINK_SPEED_VAL(pcifn, reg);
295 P3P_LINK_SPEED_REG(pcifn)); 305 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
296 speed = P3P_LINK_SPEED_VAL(pcifn, reg); 306 ethtool_cmd_speed_set(ecmd, ahw->link_speed);
297 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; 307 ecmd->autoneg = ahw->link_autoneg;
298 } 308 ecmd->duplex = ahw->link_duplex;
299 ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
300 ecmd->autoneg = adapter->ahw->link_autoneg;
301 ecmd->duplex = adapter->ahw->link_duplex;
302 goto skip; 309 goto skip;
303 } 310 }
304 311
@@ -340,8 +347,8 @@ skip:
340 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: 347 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
341 ecmd->advertising |= ADVERTISED_TP; 348 ecmd->advertising |= ADVERTISED_TP;
342 ecmd->supported |= SUPPORTED_TP; 349 ecmd->supported |= SUPPORTED_TP;
343 check_sfp_module = netif_running(dev) && 350 check_sfp_module = netif_running(adapter->netdev) &&
344 adapter->ahw->has_link_events; 351 ahw->has_link_events;
345 case QLCNIC_BRDTYPE_P3P_10G_XFP: 352 case QLCNIC_BRDTYPE_P3P_10G_XFP:
346 ecmd->supported |= SUPPORTED_FIBRE; 353 ecmd->supported |= SUPPORTED_FIBRE;
347 ecmd->advertising |= ADVERTISED_FIBRE; 354 ecmd->advertising |= ADVERTISED_FIBRE;
@@ -355,8 +362,8 @@ skip:
355 ecmd->advertising |= 362 ecmd->advertising |=
356 (ADVERTISED_FIBRE | ADVERTISED_TP); 363 (ADVERTISED_FIBRE | ADVERTISED_TP);
357 ecmd->port = PORT_FIBRE; 364 ecmd->port = PORT_FIBRE;
358 check_sfp_module = netif_running(dev) && 365 check_sfp_module = netif_running(adapter->netdev) &&
359 adapter->ahw->has_link_events; 366 ahw->has_link_events;
360 } else { 367 } else {
361 ecmd->autoneg = AUTONEG_ENABLE; 368 ecmd->autoneg = AUTONEG_ENABLE;
362 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); 369 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
@@ -365,13 +372,6 @@ skip:
365 ecmd->port = PORT_TP; 372 ecmd->port = PORT_TP;
366 } 373 }
367 break; 374 break;
368 case QLCNIC_BRDTYPE_83XX_10G:
369 ecmd->autoneg = AUTONEG_DISABLE;
370 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
371 ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP);
372 ecmd->port = PORT_FIBRE;
373 check_sfp_module = netif_running(dev) && ahw->has_link_events;
374 break;
375 default: 375 default:
376 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", 376 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
377 adapter->ahw->board_type); 377 adapter->ahw->board_type);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 95b1b5732838..b6818f4356b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -134,7 +134,7 @@ struct qlcnic_mailbox_metadata {
134 134
135#define QLCNIC_SET_OWNER 1 135#define QLCNIC_SET_OWNER 1
136#define QLCNIC_CLR_OWNER 0 136#define QLCNIC_CLR_OWNER 0
137#define QLCNIC_MBX_TIMEOUT 10000 137#define QLCNIC_MBX_TIMEOUT 5000
138 138
139#define QLCNIC_MBX_RSP_OK 1 139#define QLCNIC_MBX_RSP_OK 1
140#define QLCNIC_MBX_PORT_RSP_OK 0x1a 140#define QLCNIC_MBX_PORT_RSP_OK 0x1a
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 264d5a4f8153..8fb836d4129f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -37,24 +37,24 @@ MODULE_PARM_DESC(qlcnic_mac_learn,
37 "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); 37 "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
38 38
39int qlcnic_use_msi = 1; 39int qlcnic_use_msi = 1;
40MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 40MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
41module_param_named(use_msi, qlcnic_use_msi, int, 0444); 41module_param_named(use_msi, qlcnic_use_msi, int, 0444);
42 42
43int qlcnic_use_msi_x = 1; 43int qlcnic_use_msi_x = 1;
44MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 44MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
45module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); 45module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
46 46
47int qlcnic_auto_fw_reset = 1; 47int qlcnic_auto_fw_reset = 1;
48MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 48MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
49module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); 49module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
50 50
51int qlcnic_load_fw_file; 51int qlcnic_load_fw_file;
52MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 52MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)");
53module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); 53module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
54 54
55int qlcnic_config_npars; 55int qlcnic_config_npars;
56module_param(qlcnic_config_npars, int, 0444); 56module_param(qlcnic_config_npars, int, 0444);
57MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); 57MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)");
58 58
59static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 59static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
60static void qlcnic_remove(struct pci_dev *pdev); 60static void qlcnic_remove(struct pci_dev *pdev);
@@ -308,6 +308,23 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
308 return 0; 308 return 0;
309} 309}
310 310
311static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
312{
313 struct qlcnic_mac_list_s *cur;
314 struct list_head *head;
315
316 list_for_each(head, &adapter->mac_list) {
317 cur = list_entry(head, struct qlcnic_mac_list_s, list);
318 if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
319 qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
320 0, QLCNIC_MAC_DEL);
321 list_del(&cur->list);
322 kfree(cur);
323 return;
324 }
325 }
326}
327
311static int qlcnic_set_mac(struct net_device *netdev, void *p) 328static int qlcnic_set_mac(struct net_device *netdev, void *p)
312{ 329{
313 struct qlcnic_adapter *adapter = netdev_priv(netdev); 330 struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -322,11 +339,15 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
322 if (!is_valid_ether_addr(addr->sa_data)) 339 if (!is_valid_ether_addr(addr->sa_data))
323 return -EINVAL; 340 return -EINVAL;
324 341
342 if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
343 return 0;
344
325 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 345 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
326 netif_device_detach(netdev); 346 netif_device_detach(netdev);
327 qlcnic_napi_disable(adapter); 347 qlcnic_napi_disable(adapter);
328 } 348 }
329 349
350 qlcnic_delete_adapter_mac(adapter);
330 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); 351 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
331 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 352 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
332 qlcnic_set_multi(adapter->netdev); 353 qlcnic_set_multi(adapter->netdev);
@@ -2481,12 +2502,17 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
2481 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 2502 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2482 return; 2503 return;
2483 2504
2484 dev_err(&netdev->dev, "transmit timeout, resetting.\n"); 2505 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) {
2485 2506 netdev_info(netdev, "Tx timeout, reset the adapter.\n");
2486 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) 2507 if (qlcnic_82xx_check(adapter))
2487 adapter->need_fw_reset = 1; 2508 adapter->need_fw_reset = 1;
2488 else 2509 else if (qlcnic_83xx_check(adapter))
2510 qlcnic_83xx_idc_request_reset(adapter,
2511 QLCNIC_FORCE_FW_DUMP_KEY);
2512 } else {
2513 netdev_info(netdev, "Tx timeout, reset adapter context.\n");
2489 adapter->ahw->reset_context = 1; 2514 adapter->ahw->reset_context = 1;
2515 }
2490} 2516}
2491 2517
2492static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) 2518static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 44d547d78b84..3869c3864deb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -280,9 +280,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
280static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, 280static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
281 u32 *pay, u8 pci_func, u8 size) 281 u32 *pay, u8 pci_func, u8 size)
282{ 282{
283 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
283 struct qlcnic_hardware_context *ahw = adapter->ahw; 284 struct qlcnic_hardware_context *ahw = adapter->ahw;
284 unsigned long flags; 285 unsigned long flags;
285 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
286 u16 opcode; 286 u16 opcode;
287 u8 mbx_err_code; 287 u8 mbx_err_code;
288 int i, j; 288 int i, j;
@@ -330,15 +330,13 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
330 * assume something is wrong. 330 * assume something is wrong.
331 */ 331 */
332poll: 332poll:
333 rsp = qlcnic_83xx_mbx_poll(adapter); 333 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
334 if (rsp != QLCNIC_RCODE_TIMEOUT) { 334 if (rsp != QLCNIC_RCODE_TIMEOUT) {
335 /* Get the FW response data */ 335 /* Get the FW response data */
336 fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); 336 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
337 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { 337 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
338 __qlcnic_83xx_process_aen(adapter); 338 __qlcnic_83xx_process_aen(adapter);
339 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 339 goto poll;
340 if (mbx_val)
341 goto poll;
342 } 340 }
343 mbx_err_code = QLCNIC_MBX_STATUS(fw_data); 341 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
344 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); 342 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index c81be2da119b..1a66ccded235 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1133,9 +1133,6 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
1133 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) 1133 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
1134 return -EINVAL; 1134 return -EINVAL;
1135 1135
1136 if (!(cmd->req.arg[1] & BIT_8))
1137 return -EINVAL;
1138
1139 return 0; 1136 return 0;
1140} 1137}
1141 1138
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 87463bc701a6..50235d201592 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1106,6 +1106,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1106 if (pci_dma_mapping_error(qdev->pdev, map)) { 1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page, 1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order); 1108 qdev->lbq_buf_order);
1109 rx_ring->pg_chunk.page = NULL;
1109 netif_err(qdev, drv, qdev->ndev, 1110 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n"); 1111 "PCI mapping failed.\n");
1111 return -ENOMEM; 1112 return -ENOMEM;
@@ -2777,6 +2778,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2777 curr_idx = 0; 2778 curr_idx = 0;
2778 2779
2779 } 2780 }
2781 if (rx_ring->pg_chunk.page) {
2782 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2783 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2784 put_page(rx_ring->pg_chunk.page);
2785 rx_ring->pg_chunk.page = NULL;
2786 }
2780} 2787}
2781 2788
2782static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2789static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index f695a50bac47..43c1f3223322 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -1,6 +1,6 @@
1config STMMAC_ETH 1config STMMAC_ETH
2 tristate "STMicroelectronics 10/100/1000 Ethernet driver" 2 tristate "STMicroelectronics 10/100/1000 Ethernet driver"
3 depends on HAS_IOMEM 3 depends on HAS_IOMEM && HAS_DMA
4 select NET_CORE 4 select NET_CORE
5 select MII 5 select MII
6 select PHYLIB 6 select PHYLIB
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d5a141c7c4e7..1c502bb0c916 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -229,7 +229,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
229 } 229 }
230 230
231 if (port->passthru) 231 if (port->passthru)
232 vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); 232 vlan = list_first_or_null_rcu(&port->vlans,
233 struct macvlan_dev, list);
233 else 234 else
234 vlan = macvlan_hash_lookup(port, eth->h_dest); 235 vlan = macvlan_hash_lookup(port, eth->h_dest);
235 if (vlan == NULL) 236 if (vlan == NULL)
@@ -814,7 +815,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
814 if (err < 0) 815 if (err < 0)
815 goto upper_dev_unlink; 816 goto upper_dev_unlink;
816 817
817 list_add_tail(&vlan->list, &port->vlans); 818 list_add_tail_rcu(&vlan->list, &port->vlans);
818 netif_stacked_transfer_operstate(lowerdev, dev); 819 netif_stacked_transfer_operstate(lowerdev, dev);
819 820
820 return 0; 821 return 0;
@@ -842,7 +843,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
842{ 843{
843 struct macvlan_dev *vlan = netdev_priv(dev); 844 struct macvlan_dev *vlan = netdev_priv(dev);
844 845
845 list_del(&vlan->list); 846 list_del_rcu(&vlan->list);
846 unregister_netdevice_queue(dev, head); 847 unregister_netdevice_queue(dev, head);
847 netdev_upper_dev_unlink(vlan->lowerdev, dev); 848 netdev_upper_dev_unlink(vlan->lowerdev, dev);
848} 849}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index ed947dd76fbd..f3cdf64997d6 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
375 if (dev == NULL) 375 if (dev == NULL)
376 return; 376 return;
377 377
378 list_del(&dev->list);
379
378 ndev = dev->ndev; 380 ndev = dev->ndev;
379 381
380 unregister_netdev(ndev); 382 unregister_netdev(ndev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3c23fdc27bf0..655bb25eed2b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -28,7 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/cpu.h> 29#include <linux/cpu.h>
30 30
31static int napi_weight = 128; 31static int napi_weight = NAPI_POLL_WEIGHT;
32module_param(napi_weight, int, 0444); 32module_param(napi_weight, int, 0444);
33 33
34static bool csum = true, gso = true; 34static bool csum = true, gso = true;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9b20d9ee2719..7f702fe3ecc2 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2369,6 +2369,9 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2369 int i; 2369 int i;
2370 bool needreset = false; 2370 bool needreset = false;
2371 2371
2372 if (!test_bit(ATH_STAT_STARTED, ah->status))
2373 return;
2374
2372 mutex_lock(&ah->lock); 2375 mutex_lock(&ah->lock);
2373 2376
2374 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { 2377 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
@@ -2676,6 +2679,7 @@ done:
2676 mmiowb(); 2679 mmiowb();
2677 mutex_unlock(&ah->lock); 2680 mutex_unlock(&ah->lock);
2678 2681
2682 set_bit(ATH_STAT_STARTED, ah->status);
2679 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, 2683 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2680 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2684 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2681 2685
@@ -2737,6 +2741,7 @@ void ath5k_stop(struct ieee80211_hw *hw)
2737 2741
2738 ath5k_stop_tasklets(ah); 2742 ath5k_stop_tasklets(ah);
2739 2743
2744 clear_bit(ATH_STAT_STARTED, ah->status);
2740 cancel_delayed_work_sync(&ah->tx_complete_work); 2745 cancel_delayed_work_sync(&ah->tx_complete_work);
2741 2746
2742 if (!ath5k_modparam_no_hw_rfkill_switch) 2747 if (!ath5k_modparam_no_hw_rfkill_switch)
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 17507dc8a1e7..f3dc124c60c7 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -17,7 +17,7 @@ config ATH9K_BTCOEX_SUPPORT
17 17
18config ATH9K 18config ATH9K
19 tristate "Atheros 802.11n wireless cards support" 19 tristate "Atheros 802.11n wireless cards support"
20 depends on MAC80211 20 depends on MAC80211 && HAS_DMA
21 select ATH9K_HW 21 select ATH9K_HW
22 select MAC80211_LEDS 22 select MAC80211_LEDS
23 select LEDS_CLASS 23 select LEDS_CLASS
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 0c2ac0c6dc89..e85a8b076c22 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -233,9 +233,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
233 {0x00009d10, 0x01834061}, 233 {0x00009d10, 0x01834061},
234 {0x00009d14, 0x00c00400}, 234 {0x00009d14, 0x00c00400},
235 {0x00009d18, 0x00000000}, 235 {0x00009d18, 0x00000000},
236 {0x00009e08, 0x0078230c}, 236 {0x00009e08, 0x0038230c},
237 {0x00009e24, 0x990bb515}, 237 {0x00009e24, 0x9907b515},
238 {0x00009e28, 0x126f0000}, 238 {0x00009e28, 0x126f0600},
239 {0x00009e30, 0x06336f77}, 239 {0x00009e30, 0x06336f77},
240 {0x00009e34, 0x6af6532f}, 240 {0x00009e34, 0x6af6532f},
241 {0x00009e38, 0x0cc80c00}, 241 {0x00009e38, 0x0cc80c00},
@@ -337,7 +337,7 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
337 337
338static const u32 ar9565_1p0_baseband_postamble[][5] = { 338static const u32 ar9565_1p0_baseband_postamble[][5] = {
339 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 339 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
340 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d}, 340 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009},
341 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, 341 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
342 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, 342 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
343 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81}, 343 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
@@ -345,9 +345,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
345 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, 345 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
346 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, 346 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
347 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, 347 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
348 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, 348 {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020},
349 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, 349 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
350 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, 350 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, 351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
@@ -450,6 +450,8 @@ static const u32 ar9565_1p0_soc_postamble[][5] = {
450 450
451static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { 451static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
452 /* Addr allmodes */ 452 /* Addr allmodes */
453 {0x00004050, 0x00300300},
454 {0x0000406c, 0x00100000},
453 {0x0000a000, 0x00010000}, 455 {0x0000a000, 0x00010000},
454 {0x0000a004, 0x00030002}, 456 {0x0000a004, 0x00030002},
455 {0x0000a008, 0x00050004}, 457 {0x0000a008, 0x00050004},
@@ -498,27 +500,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
498 {0x0000a0b4, 0x00000000}, 500 {0x0000a0b4, 0x00000000},
499 {0x0000a0b8, 0x00000000}, 501 {0x0000a0b8, 0x00000000},
500 {0x0000a0bc, 0x00000000}, 502 {0x0000a0bc, 0x00000000},
501 {0x0000a0c0, 0x001f0000}, 503 {0x0000a0c0, 0x00bf00a0},
502 {0x0000a0c4, 0x01000101}, 504 {0x0000a0c4, 0x11a011a1},
503 {0x0000a0c8, 0x011e011f}, 505 {0x0000a0c8, 0x11be11bf},
504 {0x0000a0cc, 0x011c011d}, 506 {0x0000a0cc, 0x11bc11bd},
505 {0x0000a0d0, 0x02030204}, 507 {0x0000a0d0, 0x22632264},
506 {0x0000a0d4, 0x02010202}, 508 {0x0000a0d4, 0x22612262},
507 {0x0000a0d8, 0x021f0200}, 509 {0x0000a0d8, 0x227f2260},
508 {0x0000a0dc, 0x0302021e}, 510 {0x0000a0dc, 0x4322227e},
509 {0x0000a0e0, 0x03000301}, 511 {0x0000a0e0, 0x43204321},
510 {0x0000a0e4, 0x031e031f}, 512 {0x0000a0e4, 0x433e433f},
511 {0x0000a0e8, 0x0402031d}, 513 {0x0000a0e8, 0x4462433d},
512 {0x0000a0ec, 0x04000401}, 514 {0x0000a0ec, 0x44604461},
513 {0x0000a0f0, 0x041e041f}, 515 {0x0000a0f0, 0x447e447f},
514 {0x0000a0f4, 0x0502041d}, 516 {0x0000a0f4, 0x5582447d},
515 {0x0000a0f8, 0x05000501}, 517 {0x0000a0f8, 0x55805581},
516 {0x0000a0fc, 0x051e051f}, 518 {0x0000a0fc, 0x559e559f},
517 {0x0000a100, 0x06010602}, 519 {0x0000a100, 0x66816682},
518 {0x0000a104, 0x061f0600}, 520 {0x0000a104, 0x669f6680},
519 {0x0000a108, 0x061d061e}, 521 {0x0000a108, 0x669d669e},
520 {0x0000a10c, 0x07020703}, 522 {0x0000a10c, 0x77627763},
521 {0x0000a110, 0x07000701}, 523 {0x0000a110, 0x77607761},
522 {0x0000a114, 0x00000000}, 524 {0x0000a114, 0x00000000},
523 {0x0000a118, 0x00000000}, 525 {0x0000a118, 0x00000000},
524 {0x0000a11c, 0x00000000}, 526 {0x0000a11c, 0x00000000},
@@ -530,27 +532,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
530 {0x0000a134, 0x00000000}, 532 {0x0000a134, 0x00000000},
531 {0x0000a138, 0x00000000}, 533 {0x0000a138, 0x00000000},
532 {0x0000a13c, 0x00000000}, 534 {0x0000a13c, 0x00000000},
533 {0x0000a140, 0x001f0000}, 535 {0x0000a140, 0x00bf00a0},
534 {0x0000a144, 0x01000101}, 536 {0x0000a144, 0x11a011a1},
535 {0x0000a148, 0x011e011f}, 537 {0x0000a148, 0x11be11bf},
536 {0x0000a14c, 0x011c011d}, 538 {0x0000a14c, 0x11bc11bd},
537 {0x0000a150, 0x02030204}, 539 {0x0000a150, 0x22632264},
538 {0x0000a154, 0x02010202}, 540 {0x0000a154, 0x22612262},
539 {0x0000a158, 0x021f0200}, 541 {0x0000a158, 0x227f2260},
540 {0x0000a15c, 0x0302021e}, 542 {0x0000a15c, 0x4322227e},
541 {0x0000a160, 0x03000301}, 543 {0x0000a160, 0x43204321},
542 {0x0000a164, 0x031e031f}, 544 {0x0000a164, 0x433e433f},
543 {0x0000a168, 0x0402031d}, 545 {0x0000a168, 0x4462433d},
544 {0x0000a16c, 0x04000401}, 546 {0x0000a16c, 0x44604461},
545 {0x0000a170, 0x041e041f}, 547 {0x0000a170, 0x447e447f},
546 {0x0000a174, 0x0502041d}, 548 {0x0000a174, 0x5582447d},
547 {0x0000a178, 0x05000501}, 549 {0x0000a178, 0x55805581},
548 {0x0000a17c, 0x051e051f}, 550 {0x0000a17c, 0x559e559f},
549 {0x0000a180, 0x06010602}, 551 {0x0000a180, 0x66816682},
550 {0x0000a184, 0x061f0600}, 552 {0x0000a184, 0x669f6680},
551 {0x0000a188, 0x061d061e}, 553 {0x0000a188, 0x669d669e},
552 {0x0000a18c, 0x07020703}, 554 {0x0000a18c, 0x77e677e7},
553 {0x0000a190, 0x07000701}, 555 {0x0000a190, 0x77e477e5},
554 {0x0000a194, 0x00000000}, 556 {0x0000a194, 0x00000000},
555 {0x0000a198, 0x00000000}, 557 {0x0000a198, 0x00000000},
556 {0x0000a19c, 0x00000000}, 558 {0x0000a19c, 0x00000000},
@@ -770,7 +772,7 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
770 772
771static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = { 773static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
772 /* Addr allmodes */ 774 /* Addr allmodes */
773 {0x00018c00, 0x18213ede}, 775 {0x00018c00, 0x18212ede},
774 {0x00018c04, 0x000801d8}, 776 {0x00018c04, 0x000801d8},
775 {0x00018c08, 0x0003780c}, 777 {0x00018c08, 0x0003780c},
776}; 778};
@@ -889,8 +891,8 @@ static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = {
889 {0x0000a180, 0x66816682}, 891 {0x0000a180, 0x66816682},
890 {0x0000a184, 0x669f6680}, 892 {0x0000a184, 0x669f6680},
891 {0x0000a188, 0x669d669e}, 893 {0x0000a188, 0x669d669e},
892 {0x0000a18c, 0x77627763}, 894 {0x0000a18c, 0x77e677e7},
893 {0x0000a190, 0x77607761}, 895 {0x0000a190, 0x77e477e5},
894 {0x0000a194, 0x00000000}, 896 {0x0000a194, 0x00000000},
895 {0x0000a198, 0x00000000}, 897 {0x0000a198, 0x00000000},
896 {0x0000a19c, 0x00000000}, 898 {0x0000a19c, 0x00000000},
@@ -1114,7 +1116,7 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
1114 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, 1116 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1115 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, 1117 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1116 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, 1118 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1117 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1119 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
1118 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 1120 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1119 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 1121 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1120 {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004}, 1122 {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
@@ -1140,13 +1142,13 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
1140 {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5}, 1142 {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
1141 {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9}, 1143 {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
1142 {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb}, 1144 {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
1143 {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1145 {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0},
1144 {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1146 {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1},
1145 {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1147 {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2},
1146 {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1148 {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3},
1147 {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1149 {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4},
1148 {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1150 {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
1149 {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1151 {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
1150 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1152 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1151 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1153 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1152 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1154 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1174,7 +1176,7 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
1174 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, 1176 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1175 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, 1177 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1176 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, 1178 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1177 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1179 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
1178 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 1180 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1179 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 1181 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1180 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, 1182 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
@@ -1200,13 +1202,13 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
1200 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, 1202 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
1201 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, 1203 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
1202 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, 1204 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
1203 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1205 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0},
1204 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1206 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1},
1205 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1207 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2},
1206 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1208 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3},
1207 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1209 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4},
1208 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1210 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
1209 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1211 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
1210 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1212 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1211 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1213 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1212 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1214 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6963862a1872..a18414b5948b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -227,13 +227,13 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
227 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) 227 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
228 goto work; 228 goto work;
229 229
230 ath9k_set_beacon(sc);
231
232 if (ah->opmode == NL80211_IFTYPE_STATION && 230 if (ah->opmode == NL80211_IFTYPE_STATION &&
233 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { 231 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
234 spin_lock_irqsave(&sc->sc_pm_lock, flags); 232 spin_lock_irqsave(&sc->sc_pm_lock, flags);
235 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 233 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
236 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 234 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
235 } else {
236 ath9k_set_beacon(sc);
237 } 237 }
238 work: 238 work:
239 ath_restart_work(sc); 239 ath_restart_work(sc);
@@ -1332,6 +1332,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1332 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1332 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1333 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1333 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1334 struct ieee80211_key_conf ps_key = { }; 1334 struct ieee80211_key_conf ps_key = { };
1335 int key;
1335 1336
1336 ath_node_attach(sc, sta, vif); 1337 ath_node_attach(sc, sta, vif);
1337 1338
@@ -1339,7 +1340,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1339 vif->type != NL80211_IFTYPE_AP_VLAN) 1340 vif->type != NL80211_IFTYPE_AP_VLAN)
1340 return 0; 1341 return 0;
1341 1342
1342 an->ps_key = ath_key_config(common, vif, sta, &ps_key); 1343 key = ath_key_config(common, vif, sta, &ps_key);
1344 if (key > 0)
1345 an->ps_key = key;
1343 1346
1344 return 0; 1347 return 0;
1345} 1348}
@@ -1356,6 +1359,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
1356 return; 1359 return;
1357 1360
1358 ath_key_delete(common, &ps_key); 1361 ath_key_delete(common, &ps_key);
1362 an->ps_key = 0;
1359} 1363}
1360 1364
1361static int ath9k_sta_remove(struct ieee80211_hw *hw, 1365static int ath9k_sta_remove(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 523355b87659..f7c70b3a6ea9 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1728,6 +1728,25 @@ drop_recycle_buffer:
1728 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); 1728 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1729} 1729}
1730 1730
1731void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
1732{
1733 int current_slot, previous_slot;
1734
1735 B43_WARN_ON(ring->tx);
1736
1737 /* Device has filled all buffers, drop all packets and let TCP
1738 * decrease speed.
1739 * Decrement RX index by one will let the device to see all slots
1740 * as free again
1741 */
1742 /*
1743 *TODO: How to increase rx_drop in mac80211?
1744 */
1745 current_slot = ring->ops->get_current_rxslot(ring);
1746 previous_slot = prev_slot(ring, current_slot);
1747 ring->ops->set_current_rxslot(ring, previous_slot);
1748}
1749
1731void b43_dma_rx(struct b43_dmaring *ring) 1750void b43_dma_rx(struct b43_dmaring *ring)
1732{ 1751{
1733 const struct b43_dma_ops *ops = ring->ops; 1752 const struct b43_dma_ops *ops = ring->ops;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 9fdd1983079c..df8c8cdcbdb5 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -9,7 +9,7 @@
9/* DMA-Interrupt reasons. */ 9/* DMA-Interrupt reasons. */
10#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ 10#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
11 | (1 << 14) | (1 << 15)) 11 | (1 << 14) | (1 << 15))
12#define B43_DMAIRQ_NONFATALMASK (1 << 13) 12#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
13#define B43_DMAIRQ_RX_DONE (1 << 16) 13#define B43_DMAIRQ_RX_DONE (1 << 16)
14 14
15/*** 32-bit DMA Engine. ***/ 15/*** 32-bit DMA Engine. ***/
@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
295void b43_dma_handle_txstatus(struct b43_wldev *dev, 295void b43_dma_handle_txstatus(struct b43_wldev *dev,
296 const struct b43_txstatus *status); 296 const struct b43_txstatus *status);
297 297
298void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
299
298void b43_dma_rx(struct b43_dmaring *ring); 300void b43_dma_rx(struct b43_dmaring *ring);
299 301
300void b43_dma_direct_fifo_rx(struct b43_wldev *dev, 302void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index d377f77d30b5..6dd07e2ec595 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1902,30 +1902,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1902 } 1902 }
1903 } 1903 }
1904 1904
1905 if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | 1905 if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
1906 B43_DMAIRQ_NONFATALMASK))) { 1906 b43err(dev->wl,
1907 if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { 1907 "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
1908 b43err(dev->wl, "Fatal DMA error: " 1908 dma_reason[0], dma_reason[1],
1909 "0x%08X, 0x%08X, 0x%08X, " 1909 dma_reason[2], dma_reason[3],
1910 "0x%08X, 0x%08X, 0x%08X\n", 1910 dma_reason[4], dma_reason[5]);
1911 dma_reason[0], dma_reason[1], 1911 b43err(dev->wl, "This device does not support DMA "
1912 dma_reason[2], dma_reason[3],
1913 dma_reason[4], dma_reason[5]);
1914 b43err(dev->wl, "This device does not support DMA "
1915 "on your system. It will now be switched to PIO.\n"); 1912 "on your system. It will now be switched to PIO.\n");
1916 /* Fall back to PIO transfers if we get fatal DMA errors! */ 1913 /* Fall back to PIO transfers if we get fatal DMA errors! */
1917 dev->use_pio = true; 1914 dev->use_pio = true;
1918 b43_controller_restart(dev, "DMA error"); 1915 b43_controller_restart(dev, "DMA error");
1919 return; 1916 return;
1920 }
1921 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
1922 b43err(dev->wl, "DMA error: "
1923 "0x%08X, 0x%08X, 0x%08X, "
1924 "0x%08X, 0x%08X, 0x%08X\n",
1925 dma_reason[0], dma_reason[1],
1926 dma_reason[2], dma_reason[3],
1927 dma_reason[4], dma_reason[5]);
1928 }
1929 } 1917 }
1930 1918
1931 if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) 1919 if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
@@ -1944,6 +1932,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1944 handle_irq_noise(dev); 1932 handle_irq_noise(dev);
1945 1933
1946 /* Check the DMA reason registers for received data. */ 1934 /* Check the DMA reason registers for received data. */
1935 if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
1936 if (B43_DEBUG)
1937 b43warn(dev->wl, "RX descriptor underrun\n");
1938 b43_dma_handle_rx_overflow(dev->dma.rx_ring);
1939 }
1947 if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { 1940 if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
1948 if (b43_using_pio_transfers(dev)) 1941 if (b43_using_pio_transfers(dev))
1949 b43_pio_rx(dev->pio.rx_queue); 1942 b43_pio_rx(dev->pio.rx_queue);
@@ -2001,7 +1994,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
2001 return IRQ_NONE; 1994 return IRQ_NONE;
2002 1995
2003 dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) 1996 dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
2004 & 0x0001DC00; 1997 & 0x0001FC00;
2005 dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) 1998 dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
2006 & 0x0000DC00; 1999 & 0x0000DC00;
2007 dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) 2000 dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
@@ -3130,7 +3123,7 @@ static int b43_chip_init(struct b43_wldev *dev)
3130 b43_write32(dev, 0x018C, 0x02000000); 3123 b43_write32(dev, 0x018C, 0x02000000);
3131 } 3124 }
3132 b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); 3125 b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
3133 b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); 3126 b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
3134 b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); 3127 b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
3135 b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); 3128 b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
3136 b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); 3129 b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index b8f82e688c72..9a95045c97b6 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5741,8 +5741,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5741 hw->flags = 5741 hw->flags =
5742 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | 5742 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
5743 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | 5743 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
5744 IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | 5744 IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
5745 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
5746 if (il->cfg->sku & IL_SKU_N) 5745 if (il->cfg->sku & IL_SKU_N)
5747 hw->flags |= 5746 hw->flags |=
5748 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 5747 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index d3c8ece980d8..e42b266a023a 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -2234,9 +2234,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2234 if (wdev->netdev->reg_state == NETREG_REGISTERED) 2234 if (wdev->netdev->reg_state == NETREG_REGISTERED)
2235 unregister_netdevice(wdev->netdev); 2235 unregister_netdevice(wdev->netdev);
2236 2236
2237 if (wdev->netdev->reg_state == NETREG_UNREGISTERED)
2238 free_netdev(wdev->netdev);
2239
2240 /* Clear the priv in adapter */ 2237 /* Clear the priv in adapter */
2241 priv->netdev = NULL; 2238 priv->netdev = NULL;
2242 2239
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 74db0d24a579..26755d9acb55 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1191,6 +1191,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
1191 adapter->if_ops.wakeup(adapter); 1191 adapter->if_ops.wakeup(adapter);
1192 adapter->hs_activated = false; 1192 adapter->hs_activated = false;
1193 adapter->is_hs_configured = false; 1193 adapter->is_hs_configured = false;
1194 adapter->is_suspended = false;
1194 mwifiex_hs_activated_event(mwifiex_get_priv(adapter, 1195 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
1195 MWIFIEX_BSS_ROLE_ANY), 1196 MWIFIEX_BSS_ROLE_ANY),
1196 false); 1197 false);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 121443a0f2a1..2eb88ea9acf7 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -655,6 +655,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
655 struct net_device *dev) 655 struct net_device *dev)
656{ 656{
657 dev->netdev_ops = &mwifiex_netdev_ops; 657 dev->netdev_ops = &mwifiex_netdev_ops;
658 dev->destructor = free_netdev;
658 /* Initialize private structure */ 659 /* Initialize private structure */
659 priv->current_key_index = 0; 660 priv->current_key_index = 0;
660 priv->media_connected = false; 661 priv->media_connected = false;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 311d0b26b81c..1a8a19dbd635 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -96,7 +96,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
96 } else { 96 } else {
97 /* Multicast */ 97 /* Multicast */
98 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; 98 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
99 if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { 99 if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
100 dev_dbg(priv->adapter->dev, 100 dev_dbg(priv->adapter->dev,
101 "info: Enabling All Multicast!\n"); 101 "info: Enabling All Multicast!\n");
102 priv->curr_pkt_filter |= 102 priv->curr_pkt_filter |=
@@ -108,20 +108,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
108 dev_dbg(priv->adapter->dev, 108 dev_dbg(priv->adapter->dev,
109 "info: Set multicast list=%d\n", 109 "info: Set multicast list=%d\n",
110 mcast_list->num_multicast_addr); 110 mcast_list->num_multicast_addr);
111 /* Set multicast addresses to firmware */ 111 /* Send multicast addresses to firmware */
112 if (old_pkt_filter == priv->curr_pkt_filter) { 112 ret = mwifiex_send_cmd_async(priv,
113 /* Send request to firmware */ 113 HostCmd_CMD_MAC_MULTICAST_ADR,
114 ret = mwifiex_send_cmd_async(priv, 114 HostCmd_ACT_GEN_SET, 0,
115 HostCmd_CMD_MAC_MULTICAST_ADR, 115 mcast_list);
116 HostCmd_ACT_GEN_SET, 0,
117 mcast_list);
118 } else {
119 /* Send request to firmware */
120 ret = mwifiex_send_cmd_async(priv,
121 HostCmd_CMD_MAC_MULTICAST_ADR,
122 HostCmd_ACT_GEN_SET, 0,
123 mcast_list);
124 }
125 } 116 }
126 } 117 }
127 } 118 }
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index f802e7c92356..2dacd19e1b8a 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
345 */ 345 */
346void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) 346void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
347{ 347{
348 if (mw > NTB_NUM_MW) 348 if (mw >= NTB_NUM_MW)
349 return NULL; 349 return NULL;
350 350
351 return ndev->mw[mw].vbase; 351 return ndev->mw[mw].vbase;
@@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
362 */ 362 */
363resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) 363resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
364{ 364{
365 if (mw > NTB_NUM_MW) 365 if (mw >= NTB_NUM_MW)
366 return 0; 366 return 0;
367 367
368 return ndev->mw[mw].bar_sz; 368 return ndev->mw[mw].bar_sz;
@@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
380 */ 380 */
381void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) 381void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
382{ 382{
383 if (mw > NTB_NUM_MW) 383 if (mw >= NTB_NUM_MW)
384 return; 384 return;
385 385
386 dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, 386 dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
@@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1027 ndev->mw[i].vbase = 1027 ndev->mw[i].vbase =
1028 ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), 1028 ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
1029 ndev->mw[i].bar_sz); 1029 ndev->mw[i].bar_sz);
1030 dev_info(&pdev->dev, "MW %d size %d\n", i, 1030 dev_info(&pdev->dev, "MW %d size %llu\n", i,
1031 (u32) pci_resource_len(pdev, MW_TO_BAR(i))); 1031 pci_resource_len(pdev, MW_TO_BAR(i)));
1032 if (!ndev->mw[i].vbase) { 1032 if (!ndev->mw[i].vbase) {
1033 dev_warn(&pdev->dev, "Cannot remap BAR %d\n", 1033 dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
1034 MW_TO_BAR(i)); 1034 MW_TO_BAR(i));
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index e0bdfd7f9930..f8d7081ee301 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -58,7 +58,7 @@
58#include <linux/ntb.h> 58#include <linux/ntb.h>
59#include "ntb_hw.h" 59#include "ntb_hw.h"
60 60
61#define NTB_TRANSPORT_VERSION 2 61#define NTB_TRANSPORT_VERSION 3
62 62
63static unsigned int transport_mtu = 0x401E; 63static unsigned int transport_mtu = 0x401E;
64module_param(transport_mtu, uint, 0644); 64module_param(transport_mtu, uint, 0644);
@@ -173,10 +173,13 @@ struct ntb_payload_header {
173 173
174enum { 174enum {
175 VERSION = 0, 175 VERSION = 0,
176 MW0_SZ,
177 MW1_SZ,
178 NUM_QPS,
179 QP_LINKS, 176 QP_LINKS,
177 NUM_QPS,
178 NUM_MWS,
179 MW0_SZ_HIGH,
180 MW0_SZ_LOW,
181 MW1_SZ_HIGH,
182 MW1_SZ_LOW,
180 MAX_SPAD, 183 MAX_SPAD,
181}; 184};
182 185
@@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name)
297{ 300{
298 struct ntb_transport_client_dev *client_dev; 301 struct ntb_transport_client_dev *client_dev;
299 struct ntb_transport *nt; 302 struct ntb_transport *nt;
300 int rc; 303 int rc, i = 0;
301 304
302 if (list_empty(&ntb_transport_list)) 305 if (list_empty(&ntb_transport_list))
303 return -ENODEV; 306 return -ENODEV;
@@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name)
315 dev = &client_dev->dev; 318 dev = &client_dev->dev;
316 319
317 /* setup and register client devices */ 320 /* setup and register client devices */
318 dev_set_name(dev, "%s", device_name); 321 dev_set_name(dev, "%s%d", device_name, i);
319 dev->bus = &ntb_bus_type; 322 dev->bus = &ntb_bus_type;
320 dev->release = ntb_client_release; 323 dev->release = ntb_client_release;
321 dev->parent = &ntb_query_pdev(nt->ndev)->dev; 324 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
@@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name)
327 } 330 }
328 331
329 list_add_tail(&client_dev->entry, &nt->client_devs); 332 list_add_tail(&client_dev->entry, &nt->client_devs);
333 i++;
330 } 334 }
331 335
332 return 0; 336 return 0;
@@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
486 (qp_num / NTB_NUM_MW * rx_size); 490 (qp_num / NTB_NUM_MW * rx_size);
487 rx_size -= sizeof(struct ntb_rx_info); 491 rx_size -= sizeof(struct ntb_rx_info);
488 492
489 qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); 493 qp->rx_buff = qp->remote_rx_info + 1;
490 qp->rx_max_frame = min(transport_mtu, rx_size); 494 /* Due to housekeeping, there must be atleast 2 buffs */
495 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
491 qp->rx_max_entry = rx_size / qp->rx_max_frame; 496 qp->rx_max_entry = rx_size / qp->rx_max_frame;
492 qp->rx_index = 0; 497 qp->rx_index = 0;
493 498
494 qp->remote_rx_info->entry = qp->rx_max_entry; 499 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
495 500
496 /* setup the hdr offsets with 0's */ 501 /* setup the hdr offsets with 0's */
497 for (i = 0; i < qp->rx_max_entry; i++) { 502 for (i = 0; i < qp->rx_max_entry; i++) {
@@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
502 507
503 qp->rx_pkts = 0; 508 qp->rx_pkts = 0;
504 qp->tx_pkts = 0; 509 qp->tx_pkts = 0;
510 qp->tx_index = 0;
511}
512
513static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
514{
515 struct ntb_transport_mw *mw = &nt->mw[num_mw];
516 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
517
518 if (!mw->virt_addr)
519 return;
520
521 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
522 mw->virt_addr = NULL;
505} 523}
506 524
507static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) 525static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
@@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
509 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 527 struct ntb_transport_mw *mw = &nt->mw[num_mw];
510 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 528 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
511 529
530 /* No need to re-setup */
531 if (mw->size == ALIGN(size, 4096))
532 return 0;
533
534 if (mw->size != 0)
535 ntb_free_mw(nt, num_mw);
536
512 /* Alloc memory for receiving data. Must be 4k aligned */ 537 /* Alloc memory for receiving data. Must be 4k aligned */
513 mw->size = ALIGN(size, 4096); 538 mw->size = ALIGN(size, 4096);
514 539
515 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, 540 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
516 GFP_KERNEL); 541 GFP_KERNEL);
517 if (!mw->virt_addr) { 542 if (!mw->virt_addr) {
543 mw->size = 0;
518 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", 544 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
519 (int) mw->size); 545 (int) mw->size);
520 return -ENOMEM; 546 return -ENOMEM;
@@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work)
604 u32 val; 630 u32 val;
605 int rc, i; 631 int rc, i;
606 632
607 /* send the local info */ 633 /* send the local info, in the opposite order of the way we read it */
608 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); 634 for (i = 0; i < NTB_NUM_MW; i++) {
609 if (rc) { 635 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
610 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 636 ntb_get_mw_size(ndev, i) >> 32);
611 0, VERSION); 637 if (rc) {
612 goto out; 638 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
613 } 639 (u32)(ntb_get_mw_size(ndev, i) >> 32),
640 MW0_SZ_HIGH + (i * 2));
641 goto out;
642 }
614 643
615 rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0)); 644 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
616 if (rc) { 645 (u32) ntb_get_mw_size(ndev, i));
617 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 646 if (rc) {
618 (u32) ntb_get_mw_size(ndev, 0), MW0_SZ); 647 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
619 goto out; 648 (u32) ntb_get_mw_size(ndev, i),
649 MW0_SZ_LOW + (i * 2));
650 goto out;
651 }
620 } 652 }
621 653
622 rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1)); 654 rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
623 if (rc) { 655 if (rc) {
624 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 656 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
625 (u32) ntb_get_mw_size(ndev, 1), MW1_SZ); 657 NTB_NUM_MW, NUM_MWS);
626 goto out; 658 goto out;
627 } 659 }
628 660
@@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work)
633 goto out; 665 goto out;
634 } 666 }
635 667
636 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); 668 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
637 if (rc) {
638 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
639 goto out;
640 }
641
642 rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
643 if (rc) { 669 if (rc) {
644 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 670 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
645 val, QP_LINKS); 671 NTB_TRANSPORT_VERSION, VERSION);
646 goto out; 672 goto out;
647 } 673 }
648 674
@@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work)
667 goto out; 693 goto out;
668 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 694 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
669 695
670 rc = ntb_read_remote_spad(ndev, MW0_SZ, &val); 696 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
671 if (rc) { 697 if (rc) {
672 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ); 698 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
673 goto out; 699 goto out;
674 } 700 }
675 701
676 if (!val) 702 if (val != NTB_NUM_MW)
677 goto out; 703 goto out;
678 dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val); 704 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
679 705
680 rc = ntb_set_mw(nt, 0, val); 706 for (i = 0; i < NTB_NUM_MW; i++) {
681 if (rc) 707 u64 val64;
682 goto out;
683 708
684 rc = ntb_read_remote_spad(ndev, MW1_SZ, &val); 709 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
685 if (rc) { 710 if (rc) {
686 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ); 711 dev_err(&pdev->dev, "Error reading remote spad %d\n",
687 goto out; 712 MW0_SZ_HIGH + (i * 2));
688 } 713 goto out1;
714 }
689 715
690 if (!val) 716 val64 = (u64) val << 32;
691 goto out;
692 dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
693 717
694 rc = ntb_set_mw(nt, 1, val); 718 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
695 if (rc) 719 if (rc) {
696 goto out; 720 dev_err(&pdev->dev, "Error reading remote spad %d\n",
721 MW0_SZ_LOW + (i * 2));
722 goto out1;
723 }
724
725 val64 |= val;
726
727 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
728
729 rc = ntb_set_mw(nt, i, val64);
730 if (rc)
731 goto out1;
732 }
697 733
698 nt->transport_link = NTB_LINK_UP; 734 nt->transport_link = NTB_LINK_UP;
699 735
@@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work)
708 744
709 return; 745 return;
710 746
747out1:
748 for (i = 0; i < NTB_NUM_MW; i++)
749 ntb_free_mw(nt, i);
711out: 750out:
712 if (ntb_hw_link_status(ndev)) 751 if (ntb_hw_link_status(ndev))
713 schedule_delayed_work(&nt->link_work, 752 schedule_delayed_work(&nt->link_work,
@@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
780 (qp_num / NTB_NUM_MW * tx_size); 819 (qp_num / NTB_NUM_MW * tx_size);
781 tx_size -= sizeof(struct ntb_rx_info); 820 tx_size -= sizeof(struct ntb_rx_info);
782 821
783 qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); 822 qp->tx_mw = qp->rx_info + 1;
784 qp->tx_max_frame = min(transport_mtu, tx_size); 823 /* Due to housekeeping, there must be atleast 2 buffs */
824 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
785 qp->tx_max_entry = tx_size / qp->tx_max_frame; 825 qp->tx_max_entry = tx_size / qp->tx_max_frame;
786 qp->tx_index = 0;
787 826
788 if (nt->debugfs_dir) { 827 if (nt->debugfs_dir) {
789 char debugfs_name[4]; 828 char debugfs_name[4];
@@ -897,10 +936,7 @@ void ntb_transport_free(void *transport)
897 pdev = ntb_query_pdev(nt->ndev); 936 pdev = ntb_query_pdev(nt->ndev);
898 937
899 for (i = 0; i < NTB_NUM_MW; i++) 938 for (i = 0; i < NTB_NUM_MW; i++)
900 if (nt->mw[i].virt_addr) 939 ntb_free_mw(nt, i);
901 dma_free_coherent(&pdev->dev, nt->mw[i].size,
902 nt->mw[i].virt_addr,
903 nt->mw[i].dma_addr);
904 940
905 kfree(nt->qps); 941 kfree(nt->qps);
906 ntb_unregister_transport(nt->ndev); 942 ntb_unregister_transport(nt->ndev);
@@ -999,11 +1035,16 @@ out:
999static void ntb_transport_rx(unsigned long data) 1035static void ntb_transport_rx(unsigned long data)
1000{ 1036{
1001 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; 1037 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
1002 int rc; 1038 int rc, i;
1003 1039
1004 do { 1040 /* Limit the number of packets processed in a single interrupt to
1041 * provide fairness to others
1042 */
1043 for (i = 0; i < qp->rx_max_entry; i++) {
1005 rc = ntb_process_rxc(qp); 1044 rc = ntb_process_rxc(qp);
1006 } while (!rc); 1045 if (rc)
1046 break;
1047 }
1007} 1048}
1008 1049
1009static void ntb_transport_rxc_db(void *data, int db_num) 1050static void ntb_transport_rxc_db(void *data, int db_num)
@@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1210 */ 1251 */
1211void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1252void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1212{ 1253{
1213 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); 1254 struct pci_dev *pdev;
1214 struct ntb_queue_entry *entry; 1255 struct ntb_queue_entry *entry;
1215 1256
1216 if (!qp) 1257 if (!qp)
1217 return; 1258 return;
1218 1259
1260 pdev = ntb_query_pdev(qp->ndev);
1261
1219 cancel_delayed_work_sync(&qp->link_work); 1262 cancel_delayed_work_sync(&qp->link_work);
1220 1263
1221 ntb_unregister_db_callback(qp->ndev, qp->qp_num); 1264 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
@@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1371 */ 1414 */
1372void ntb_transport_link_down(struct ntb_transport_qp *qp) 1415void ntb_transport_link_down(struct ntb_transport_qp *qp)
1373{ 1416{
1374 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); 1417 struct pci_dev *pdev;
1375 int rc, val; 1418 int rc, val;
1376 1419
1377 if (!qp) 1420 if (!qp)
1378 return; 1421 return;
1379 1422
1423 pdev = ntb_query_pdev(qp->ndev);
1380 qp->client_ready = NTB_LINK_DOWN; 1424 qp->client_ready = NTB_LINK_DOWN;
1381 1425
1382 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); 1426 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
@@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1408 */ 1452 */
1409bool ntb_transport_link_query(struct ntb_transport_qp *qp) 1453bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1410{ 1454{
1455 if (!qp)
1456 return false;
1457
1411 return qp->qp_link == NTB_LINK_UP; 1458 return qp->qp_link == NTB_LINK_UP;
1412} 1459}
1413EXPORT_SYMBOL_GPL(ntb_transport_link_query); 1460EXPORT_SYMBOL_GPL(ntb_transport_link_query);
@@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1422 */ 1469 */
1423unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 1470unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1424{ 1471{
1472 if (!qp)
1473 return 0;
1474
1425 return qp->qp_num; 1475 return qp->qp_num;
1426} 1476}
1427EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 1477EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
@@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1436 */ 1486 */
1437unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 1487unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1438{ 1488{
1489 if (!qp)
1490 return 0;
1491
1439 return qp->tx_max_frame - sizeof(struct ntb_payload_header); 1492 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1440} 1493}
1441EXPORT_SYMBOL_GPL(ntb_transport_max_size); 1494EXPORT_SYMBOL_GPL(ntb_transport_max_size);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 0c81915b1997..b9838130a7b0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -20,7 +20,6 @@ if RTC_CLASS
20config RTC_HCTOSYS 20config RTC_HCTOSYS
21 bool "Set system time from RTC on startup and resume" 21 bool "Set system time from RTC on startup and resume"
22 default y 22 default y
23 depends on !ALWAYS_USE_PERSISTENT_CLOCK
24 help 23 help
25 If you say yes here, the system time (wall clock) will be set using 24 If you say yes here, the system time (wall clock) will be set using
26 the value read from a specified RTC device. This is useful to avoid 25 the value read from a specified RTC device. This is useful to avoid
@@ -29,7 +28,6 @@ config RTC_HCTOSYS
29config RTC_SYSTOHC 28config RTC_SYSTOHC
30 bool "Set the RTC time based on NTP synchronization" 29 bool "Set the RTC time based on NTP synchronization"
31 default y 30 default y
32 depends on !ALWAYS_USE_PERSISTENT_CLOCK
33 help 31 help
34 If you say yes here, the system time (wall clock) will be stored 32 If you say yes here, the system time (wall clock) will be stored
35 in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11 33 in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 787bd2c22bca..380387a47b1d 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
526 } 526 }
527 527
528 if (xfer->tx_buf) 528 if (xfer->tx_buf)
529 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); 529 if (xfer->bits_per_word > 8)
530 spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
531 else
532 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
530 else 533 else
531 spi_writel(as, TDR, 0); 534 spi_writel(as, TDR, 0);
532 535
533 dev_dbg(master->dev.parent, 536 dev_dbg(master->dev.parent,
534 " start pio xfer %p: len %u tx %p rx %p\n", 537 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
535 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); 538 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
539 xfer->bits_per_word);
536 540
537 /* Enable relevant interrupts */ 541 /* Enable relevant interrupts */
538 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); 542 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
@@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
950{ 954{
951 u8 *txp; 955 u8 *txp;
952 u8 *rxp; 956 u8 *rxp;
957 u16 *txp16;
958 u16 *rxp16;
953 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 959 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
954 960
955 if (xfer->rx_buf) { 961 if (xfer->rx_buf) {
956 rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 962 if (xfer->bits_per_word > 8) {
957 *rxp = spi_readl(as, RDR); 963 rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
964 *rxp16 = spi_readl(as, RDR);
965 } else {
966 rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
967 *rxp = spi_readl(as, RDR);
968 }
958 } else { 969 } else {
959 spi_readl(as, RDR); 970 spi_readl(as, RDR);
960 } 971 }
961 972 if (xfer->bits_per_word > 8) {
962 as->current_remaining_bytes--; 973 as->current_remaining_bytes -= 2;
974 if (as->current_remaining_bytes < 0)
975 as->current_remaining_bytes = 0;
976 } else {
977 as->current_remaining_bytes--;
978 }
963 979
964 if (as->current_remaining_bytes) { 980 if (as->current_remaining_bytes) {
965 if (xfer->tx_buf) { 981 if (xfer->tx_buf) {
966 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; 982 if (xfer->bits_per_word > 8) {
967 spi_writel(as, TDR, *txp); 983 txp16 = (u16 *)(((u8 *)xfer->tx_buf)
984 + xfer_pos + 2);
985 spi_writel(as, TDR, *txp16);
986 } else {
987 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
988 spi_writel(as, TDR, *txp);
989 }
968 } else { 990 } else {
969 spi_writel(as, TDR, 0); 991 spi_writel(as, TDR, 0);
970 } 992 }
@@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1378 } 1400 }
1379 } 1401 }
1380 1402
1403 if (xfer->bits_per_word > 8) {
1404 if (xfer->len % 2) {
1405 dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
1406 return -EINVAL;
1407 }
1408 }
1409
1381 /* FIXME implement these protocol options!! */ 1410 /* FIXME implement these protocol options!! */
1382 if (xfer->speed_hz) { 1411 if (xfer->speed_hz < spi->max_speed_hz) {
1383 dev_dbg(&spi->dev, "no protocol options yet\n"); 1412 dev_dbg(&spi->dev, "can't change speed in transfer\n");
1384 return -ENOPROTOOPT; 1413 return -ENOPROTOOPT;
1385 } 1414 }
1386 1415
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 2e8f24a1fb95..50b13c9b1ab6 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = {
784 }, 784 },
785 { }, 785 { },
786}; 786};
787MODULE_DEVICE_TABLE(of, davini_spi_of_match); 787MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
788 788
789/** 789/**
790 * spi_davinci_get_pdata - Get platform data from DTS binding 790 * spi_davinci_get_pdata - Get platform data from DTS binding
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 163fd802b7ac..32b7bb111eb6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
334 spi->dev.parent = &master->dev; 334 spi->dev.parent = &master->dev;
335 spi->dev.bus = &spi_bus_type; 335 spi->dev.bus = &spi_bus_type;
336 spi->dev.release = spidev_release; 336 spi->dev.release = spidev_release;
337 spi->cs_gpio = -EINVAL; 337 spi->cs_gpio = -ENOENT;
338 device_initialize(&spi->dev); 338 device_initialize(&spi->dev);
339 return spi; 339 return spi;
340} 340}
@@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master)
1067 nb = of_gpio_named_count(np, "cs-gpios"); 1067 nb = of_gpio_named_count(np, "cs-gpios");
1068 master->num_chipselect = max(nb, (int)master->num_chipselect); 1068 master->num_chipselect = max(nb, (int)master->num_chipselect);
1069 1069
1070 if (nb < 1) 1070 /* Return error only for an incorrectly formed cs-gpios property */
1071 if (nb == 0 || nb == -ENOENT)
1071 return 0; 1072 return 0;
1073 else if (nb < 0)
1074 return nb;
1072 1075
1073 cs = devm_kzalloc(&master->dev, 1076 cs = devm_kzalloc(&master->dev,
1074 sizeof(int) * master->num_chipselect, 1077 sizeof(int) * master->num_chipselect,
@@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master)
1079 return -ENOMEM; 1082 return -ENOMEM;
1080 1083
1081 for (i = 0; i < master->num_chipselect; i++) 1084 for (i = 0; i < master->num_chipselect; i++)
1082 cs[i] = -EINVAL; 1085 cs[i] = -ENOENT;
1083 1086
1084 for (i = 0; i < nb; i++) 1087 for (i = 0; i < nb; i++)
1085 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1088 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index ffbc6a94be52..262ef1f23b38 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg(
1250 1250
1251static void iscsit_do_crypto_hash_buf( 1251static void iscsit_do_crypto_hash_buf(
1252 struct hash_desc *hash, 1252 struct hash_desc *hash,
1253 unsigned char *buf, 1253 const void *buf,
1254 u32 payload_length, 1254 u32 payload_length,
1255 u32 padding, 1255 u32 padding,
1256 u8 *pad_bytes, 1256 u8 *pad_bytes,
@@ -2524,9 +2524,8 @@ static int iscsit_send_conn_drop_async_message(
2524 if (conn->conn_ops->HeaderDigest) { 2524 if (conn->conn_ops->HeaderDigest) {
2525 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2525 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2526 2526
2527 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2527 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2528 (unsigned char *)hdr, ISCSI_HDR_LEN, 2528 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2529 0, NULL, (u8 *)header_digest);
2530 2529
2531 cmd->tx_size += ISCSI_CRC_LEN; 2530 cmd->tx_size += ISCSI_CRC_LEN;
2532 pr_debug("Attaching CRC32C HeaderDigest to" 2531 pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2662,9 +2661,8 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2662 if (conn->conn_ops->HeaderDigest) { 2661 if (conn->conn_ops->HeaderDigest) {
2663 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2662 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2664 2663
2665 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2664 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
2666 (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, 2665 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2667 0, NULL, (u8 *)header_digest);
2668 2666
2669 iov[0].iov_len += ISCSI_CRC_LEN; 2667 iov[0].iov_len += ISCSI_CRC_LEN;
2670 tx_size += ISCSI_CRC_LEN; 2668 tx_size += ISCSI_CRC_LEN;
@@ -2841,9 +2839,8 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2841 if (conn->conn_ops->HeaderDigest) { 2839 if (conn->conn_ops->HeaderDigest) {
2842 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2840 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2843 2841
2844 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2842 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
2845 (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN, 2843 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2846 0, NULL, (u8 *)header_digest);
2847 2844
2848 iov[0].iov_len += ISCSI_CRC_LEN; 2845 iov[0].iov_len += ISCSI_CRC_LEN;
2849 tx_size += ISCSI_CRC_LEN; 2846 tx_size += ISCSI_CRC_LEN;
@@ -2900,9 +2897,8 @@ static int iscsit_send_unsolicited_nopin(
2900 if (conn->conn_ops->HeaderDigest) { 2897 if (conn->conn_ops->HeaderDigest) {
2901 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2898 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2902 2899
2903 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2900 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2904 (unsigned char *)hdr, ISCSI_HDR_LEN, 2901 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2905 0, NULL, (u8 *)header_digest);
2906 2902
2907 tx_size += ISCSI_CRC_LEN; 2903 tx_size += ISCSI_CRC_LEN;
2908 pr_debug("Attaching CRC32C HeaderDigest to" 2904 pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2949,9 +2945,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2949 if (conn->conn_ops->HeaderDigest) { 2945 if (conn->conn_ops->HeaderDigest) {
2950 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2946 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2951 2947
2952 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2948 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2953 (unsigned char *)hdr, ISCSI_HDR_LEN, 2949 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2954 0, NULL, (u8 *)header_digest);
2955 2950
2956 iov[0].iov_len += ISCSI_CRC_LEN; 2951 iov[0].iov_len += ISCSI_CRC_LEN;
2957 tx_size += ISCSI_CRC_LEN; 2952 tx_size += ISCSI_CRC_LEN;
@@ -3040,9 +3035,8 @@ static int iscsit_send_r2t(
3040 if (conn->conn_ops->HeaderDigest) { 3035 if (conn->conn_ops->HeaderDigest) {
3041 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3036 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3042 3037
3043 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3038 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3044 (unsigned char *)hdr, ISCSI_HDR_LEN, 3039 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3045 0, NULL, (u8 *)header_digest);
3046 3040
3047 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3041 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3048 tx_size += ISCSI_CRC_LEN; 3042 tx_size += ISCSI_CRC_LEN;
@@ -3256,9 +3250,8 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3256 if (conn->conn_ops->HeaderDigest) { 3250 if (conn->conn_ops->HeaderDigest) {
3257 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3251 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3258 3252
3259 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3253 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
3260 (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, 3254 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3261 0, NULL, (u8 *)header_digest);
3262 3255
3263 iov[0].iov_len += ISCSI_CRC_LEN; 3256 iov[0].iov_len += ISCSI_CRC_LEN;
3264 tx_size += ISCSI_CRC_LEN; 3257 tx_size += ISCSI_CRC_LEN;
@@ -3329,9 +3322,8 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3329 if (conn->conn_ops->HeaderDigest) { 3322 if (conn->conn_ops->HeaderDigest) {
3330 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3323 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3331 3324
3332 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3325 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3333 (unsigned char *)hdr, ISCSI_HDR_LEN, 3326 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3334 0, NULL, (u8 *)header_digest);
3335 3327
3336 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3328 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3337 tx_size += ISCSI_CRC_LEN; 3329 tx_size += ISCSI_CRC_LEN;
@@ -3504,9 +3496,8 @@ static int iscsit_send_text_rsp(
3504 if (conn->conn_ops->HeaderDigest) { 3496 if (conn->conn_ops->HeaderDigest) {
3505 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3497 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3506 3498
3507 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3499 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3508 (unsigned char *)hdr, ISCSI_HDR_LEN, 3500 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3509 0, NULL, (u8 *)header_digest);
3510 3501
3511 iov[0].iov_len += ISCSI_CRC_LEN; 3502 iov[0].iov_len += ISCSI_CRC_LEN;
3512 tx_size += ISCSI_CRC_LEN; 3503 tx_size += ISCSI_CRC_LEN;
@@ -3557,11 +3548,11 @@ static int iscsit_send_reject(
3557 struct iscsi_cmd *cmd, 3548 struct iscsi_cmd *cmd,
3558 struct iscsi_conn *conn) 3549 struct iscsi_conn *conn)
3559{ 3550{
3560 u32 iov_count = 0, tx_size = 0; 3551 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3561 struct iscsi_reject *hdr;
3562 struct kvec *iov; 3552 struct kvec *iov;
3553 u32 iov_count = 0, tx_size;
3563 3554
3564 iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]); 3555 iscsit_build_reject(cmd, conn, hdr);
3565 3556
3566 iov = &cmd->iov_misc[0]; 3557 iov = &cmd->iov_misc[0];
3567 iov[iov_count].iov_base = cmd->pdu; 3558 iov[iov_count].iov_base = cmd->pdu;
@@ -3574,9 +3565,8 @@ static int iscsit_send_reject(
3574 if (conn->conn_ops->HeaderDigest) { 3565 if (conn->conn_ops->HeaderDigest) {
3575 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3566 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3576 3567
3577 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3568 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3578 (unsigned char *)hdr, ISCSI_HDR_LEN, 3569 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3579 0, NULL, (u8 *)header_digest);
3580 3570
3581 iov[0].iov_len += ISCSI_CRC_LEN; 3571 iov[0].iov_len += ISCSI_CRC_LEN;
3582 tx_size += ISCSI_CRC_LEN; 3572 tx_size += ISCSI_CRC_LEN;
@@ -3585,9 +3575,8 @@ static int iscsit_send_reject(
3585 } 3575 }
3586 3576
3587 if (conn->conn_ops->DataDigest) { 3577 if (conn->conn_ops->DataDigest) {
3588 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3578 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
3589 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, 3579 ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
3590 0, NULL, (u8 *)&cmd->data_crc);
3591 3580
3592 iov[iov_count].iov_base = &cmd->data_crc; 3581 iov[iov_count].iov_base = &cmd->data_crc;
3593 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3582 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 7816af6cdd12..40d9dbca987b 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn(
823 /* 823 /*
824 * CmdSN is greater than the tail of the list. 824 * CmdSN is greater than the tail of the list.
825 */ 825 */
826 if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn) 826 if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
827 list_add_tail(&ooo_cmdsn->ooo_list, 827 list_add_tail(&ooo_cmdsn->ooo_list,
828 &sess->sess_ooo_cmdsn_list); 828 &sess->sess_ooo_cmdsn_list);
829 else { 829 else {
@@ -833,11 +833,12 @@ static int iscsit_attach_ooo_cmdsn(
833 */ 833 */
834 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, 834 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
835 ooo_list) { 835 ooo_list) {
836 if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 836 if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
837 continue; 837 continue;
838 838
839 /* Insert before this entry */
839 list_add(&ooo_cmdsn->ooo_list, 840 list_add(&ooo_cmdsn->ooo_list,
840 &ooo_tmp->ooo_list); 841 ooo_tmp->ooo_list.prev);
841 break; 842 break;
842 } 843 }
843 } 844 }
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index f690be9e5293..c2185fc31136 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
436 /* 436 /*
437 * Extra parameters for ISER from RFC-5046 437 * Extra parameters for ISER from RFC-5046
438 */ 438 */
439 param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS, 439 param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
440 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, 440 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
441 TYPERANGE_BOOL_AND, USE_LEADING_ONLY); 441 TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
442 if (!param) 442 if (!param)
@@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate(
529 SET_PSTATE_NEGOTIATE(param); 529 SET_PSTATE_NEGOTIATE(param);
530 } else if (!strcmp(param->name, OFMARKINT)) { 530 } else if (!strcmp(param->name, OFMARKINT)) {
531 SET_PSTATE_NEGOTIATE(param); 531 SET_PSTATE_NEGOTIATE(param);
532 } else if (!strcmp(param->name, RDMAEXTENTIONS)) { 532 } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
533 if (iser == true) 533 if (iser == true)
534 SET_PSTATE_NEGOTIATE(param); 534 SET_PSTATE_NEGOTIATE(param);
535 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { 535 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
@@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery(
580 param->state &= ~PSTATE_NEGOTIATE; 580 param->state &= ~PSTATE_NEGOTIATE;
581 else if (!strcmp(param->name, OFMARKINT)) 581 else if (!strcmp(param->name, OFMARKINT))
582 param->state &= ~PSTATE_NEGOTIATE; 582 param->state &= ~PSTATE_NEGOTIATE;
583 else if (!strcmp(param->name, RDMAEXTENTIONS)) 583 else if (!strcmp(param->name, RDMAEXTENSIONS))
584 param->state &= ~PSTATE_NEGOTIATE; 584 param->state &= ~PSTATE_NEGOTIATE;
585 else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) 585 else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
586 param->state &= ~PSTATE_NEGOTIATE; 586 param->state &= ~PSTATE_NEGOTIATE;
@@ -1977,7 +1977,7 @@ void iscsi_set_session_parameters(
1977 ops->SessionType = !strcmp(param->value, DISCOVERY); 1977 ops->SessionType = !strcmp(param->value, DISCOVERY);
1978 pr_debug("SessionType: %s\n", 1978 pr_debug("SessionType: %s\n",
1979 param->value); 1979 param->value);
1980 } else if (!strcmp(param->name, RDMAEXTENTIONS)) { 1980 } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
1981 ops->RDMAExtensions = !strcmp(param->value, YES); 1981 ops->RDMAExtensions = !strcmp(param->value, YES);
1982 pr_debug("RDMAExtensions: %s\n", 1982 pr_debug("RDMAExtensions: %s\n",
1983 param->value); 1983 param->value);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index f31b9c4b83f2..915b06798505 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -91,7 +91,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
91/* 91/*
92 * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 92 * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
93 */ 93 */
94#define RDMAEXTENTIONS "RDMAExtensions" 94#define RDMAEXTENSIONS "RDMAExtensions"
95#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" 95#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
96#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" 96#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
97 97
@@ -142,7 +142,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
142/* 142/*
143 * Initial values for iSER parameters following RFC-5046 Section 6 143 * Initial values for iSER parameters following RFC-5046 Section 6
144 */ 144 */
145#define INITIAL_RDMAEXTENTIONS NO 145#define INITIAL_RDMAEXTENSIONS NO
146#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" 146#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
147#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" 147#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
148 148
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 43b7ac6c5b1c..4a8bd36d3958 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1584,6 +1584,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
1584 .store = target_core_store_dev_udev_path, 1584 .store = target_core_store_dev_udev_path,
1585}; 1585};
1586 1586
1587static ssize_t target_core_show_dev_enable(void *p, char *page)
1588{
1589 struct se_device *dev = p;
1590
1591 return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
1592}
1593
1587static ssize_t target_core_store_dev_enable( 1594static ssize_t target_core_store_dev_enable(
1588 void *p, 1595 void *p,
1589 const char *page, 1596 const char *page,
@@ -1609,8 +1616,8 @@ static ssize_t target_core_store_dev_enable(
1609static struct target_core_configfs_attribute target_core_attr_dev_enable = { 1616static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1610 .attr = { .ca_owner = THIS_MODULE, 1617 .attr = { .ca_owner = THIS_MODULE,
1611 .ca_name = "enable", 1618 .ca_name = "enable",
1612 .ca_mode = S_IWUSR }, 1619 .ca_mode = S_IRUGO | S_IWUSR },
1613 .show = NULL, 1620 .show = target_core_show_dev_enable,
1614 .store = target_core_store_dev_enable, 1621 .store = target_core_store_dev_enable,
1615}; 1622};
1616 1623
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 2e4d655471bc..4630481b6043 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -68,7 +68,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
68 struct se_dev_entry *deve = se_cmd->se_deve; 68 struct se_dev_entry *deve = se_cmd->se_deve;
69 69
70 deve->total_cmds++; 70 deve->total_cmds++;
71 deve->total_bytes += se_cmd->data_length;
72 71
73 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 72 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
74 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 73 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
@@ -85,8 +84,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
85 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 84 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
86 deve->read_bytes += se_cmd->data_length; 85 deve->read_bytes += se_cmd->data_length;
87 86
88 deve->deve_cmds++;
89
90 se_lun = deve->se_lun; 87 se_lun = deve->se_lun;
91 se_cmd->se_lun = deve->se_lun; 88 se_cmd->se_lun = deve->se_lun;
92 se_cmd->pr_res_key = deve->pr_res_key; 89 se_cmd->pr_res_key = deve->pr_res_key;
@@ -275,17 +272,6 @@ int core_free_device_list_for_node(
275 return 0; 272 return 0;
276} 273}
277 274
278void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
279{
280 struct se_dev_entry *deve;
281 unsigned long flags;
282
283 spin_lock_irqsave(&se_nacl->device_list_lock, flags);
284 deve = se_nacl->device_list[se_cmd->orig_fe_lun];
285 deve->deve_cmds--;
286 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
287}
288
289void core_update_device_list_access( 275void core_update_device_list_access(
290 u32 mapped_lun, 276 u32 mapped_lun,
291 u32 lun_access, 277 u32 lun_access,
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 58ed683e04ae..1b1d544e927a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -153,10 +153,6 @@ static int fd_configure_device(struct se_device *dev)
153 struct request_queue *q = bdev_get_queue(inode->i_bdev); 153 struct request_queue *q = bdev_get_queue(inode->i_bdev);
154 unsigned long long dev_size; 154 unsigned long long dev_size;
155 155
156 dev->dev_attrib.hw_block_size =
157 bdev_logical_block_size(inode->i_bdev);
158 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
159
160 /* 156 /*
161 * Determine the number of bytes from i_size_read() minus 157 * Determine the number of bytes from i_size_read() minus
162 * one (1) logical sector from underlying struct block_device 158 * one (1) logical sector from underlying struct block_device
@@ -203,9 +199,6 @@ static int fd_configure_device(struct se_device *dev)
203 goto fail; 199 goto fail;
204 } 200 }
205 201
206 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
207 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
208
209 /* 202 /*
210 * Limit UNMAP emulation to 8k Number of LBAs (NoLB) 203 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
211 */ 204 */
@@ -226,6 +219,8 @@ static int fd_configure_device(struct se_device *dev)
226 219
227 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; 220 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
228 221
222 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
229 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
230 225
231 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 07f5f94634bb..aa1620abec6d 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -615,6 +615,8 @@ iblock_execute_rw(struct se_cmd *cmd)
615 rw = WRITE_FUA; 615 rw = WRITE_FUA;
616 else if (!(q->flush_flags & REQ_FLUSH)) 616 else if (!(q->flush_flags & REQ_FLUSH))
617 rw = WRITE_FUA; 617 rw = WRITE_FUA;
618 else
619 rw = WRITE;
618 } else { 620 } else {
619 rw = WRITE; 621 rw = WRITE;
620 } 622 }
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 853bab60e362..18d49df4d0ac 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp;
8struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); 8struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
9int core_free_device_list_for_node(struct se_node_acl *, 9int core_free_device_list_for_node(struct se_node_acl *,
10 struct se_portal_group *); 10 struct se_portal_group *);
11void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
12void core_update_device_list_access(u32, u32, struct se_node_acl *); 11void core_update_device_list_access(u32, u32, struct se_node_acl *);
13int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, 12int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
14 u32, u32, struct se_node_acl *, struct se_portal_group *); 13 u32, u32, struct se_node_acl *, struct se_portal_group *);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index e0b3c379aa14..0921a64b5550 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -291,6 +291,11 @@ rd_execute_rw(struct se_cmd *cmd)
291 u32 src_len; 291 u32 src_len;
292 u64 tmp; 292 u64 tmp;
293 293
294 if (dev->rd_flags & RDF_NULLIO) {
295 target_complete_cmd(cmd, SAM_STAT_GOOD);
296 return 0;
297 }
298
294 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; 299 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
295 rd_offset = do_div(tmp, PAGE_SIZE); 300 rd_offset = do_div(tmp, PAGE_SIZE);
296 rd_page = tmp; 301 rd_page = tmp;
@@ -373,11 +378,12 @@ rd_execute_rw(struct se_cmd *cmd)
373} 378}
374 379
375enum { 380enum {
376 Opt_rd_pages, Opt_err 381 Opt_rd_pages, Opt_rd_nullio, Opt_err
377}; 382};
378 383
379static match_table_t tokens = { 384static match_table_t tokens = {
380 {Opt_rd_pages, "rd_pages=%d"}, 385 {Opt_rd_pages, "rd_pages=%d"},
386 {Opt_rd_nullio, "rd_nullio=%d"},
381 {Opt_err, NULL} 387 {Opt_err, NULL}
382}; 388};
383 389
@@ -408,6 +414,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
408 " Count: %u\n", rd_dev->rd_page_count); 414 " Count: %u\n", rd_dev->rd_page_count);
409 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; 415 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
410 break; 416 break;
417 case Opt_rd_nullio:
418 match_int(args, &arg);
419 if (arg != 1)
420 break;
421
422 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
423 rd_dev->rd_flags |= RDF_NULLIO;
424 break;
411 default: 425 default:
412 break; 426 break;
413 } 427 }
@@ -424,8 +438,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
424 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", 438 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
425 rd_dev->rd_dev_id); 439 rd_dev->rd_dev_id);
426 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" 440 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
427 " SG_table_count: %u\n", rd_dev->rd_page_count, 441 " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
428 PAGE_SIZE, rd_dev->sg_table_count); 442 PAGE_SIZE, rd_dev->sg_table_count,
443 !!(rd_dev->rd_flags & RDF_NULLIO));
429 return bl; 444 return bl;
430} 445}
431 446
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 933b38b6e563..1789d1e14395 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -22,6 +22,7 @@ struct rd_dev_sg_table {
22} ____cacheline_aligned; 22} ____cacheline_aligned;
23 23
24#define RDF_HAS_PAGE_COUNT 0x01 24#define RDF_HAS_PAGE_COUNT 0x01
25#define RDF_NULLIO 0x02
25 26
26struct rd_dev { 27struct rd_dev {
27 struct se_device dev; 28 struct se_device dev;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index f8388b4024aa..4a793362309d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2163,8 +2163,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2163 if (wait_for_tasks) 2163 if (wait_for_tasks)
2164 transport_wait_for_tasks(cmd); 2164 transport_wait_for_tasks(cmd);
2165 2165
2166 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
2167
2168 if (cmd->se_lun) 2166 if (cmd->se_lun)
2169 transport_lun_remove_cmd(cmd); 2167 transport_lun_remove_cmd(cmd);
2170 2168
@@ -2213,21 +2211,19 @@ static void target_release_cmd_kref(struct kref *kref)
2213{ 2211{
2214 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2212 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2215 struct se_session *se_sess = se_cmd->se_sess; 2213 struct se_session *se_sess = se_cmd->se_sess;
2216 unsigned long flags;
2217 2214
2218 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2219 if (list_empty(&se_cmd->se_cmd_list)) { 2215 if (list_empty(&se_cmd->se_cmd_list)) {
2220 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2216 spin_unlock(&se_sess->sess_cmd_lock);
2221 se_cmd->se_tfo->release_cmd(se_cmd); 2217 se_cmd->se_tfo->release_cmd(se_cmd);
2222 return; 2218 return;
2223 } 2219 }
2224 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2220 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2225 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2221 spin_unlock(&se_sess->sess_cmd_lock);
2226 complete(&se_cmd->cmd_wait_comp); 2222 complete(&se_cmd->cmd_wait_comp);
2227 return; 2223 return;
2228 } 2224 }
2229 list_del(&se_cmd->se_cmd_list); 2225 list_del(&se_cmd->se_cmd_list);
2230 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2226 spin_unlock(&se_sess->sess_cmd_lock);
2231 2227
2232 se_cmd->se_tfo->release_cmd(se_cmd); 2228 se_cmd->se_tfo->release_cmd(se_cmd);
2233} 2229}
@@ -2238,7 +2234,8 @@ static void target_release_cmd_kref(struct kref *kref)
2238 */ 2234 */
2239int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 2235int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
2240{ 2236{
2241 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2237 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
2238 &se_sess->sess_cmd_lock);
2242} 2239}
2243EXPORT_SYMBOL(target_put_sess_cmd); 2240EXPORT_SYMBOL(target_put_sess_cmd);
2244 2241
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index bff0775e258c..5174ebac288d 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Since these may be in userspace, we use (inline) accessors. 4 * Since these may be in userspace, we use (inline) accessors.
5 */ 5 */
6#include <linux/module.h>
6#include <linux/vringh.h> 7#include <linux/vringh.h>
7#include <linux/virtio_ring.h> 8#include <linux/virtio_ring.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh)
1005 return __vringh_need_notify(vrh, getu16_kern); 1006 return __vringh_need_notify(vrh, getu16_kern);
1006} 1007}
1007EXPORT_SYMBOL(vringh_need_notify_kern); 1008EXPORT_SYMBOL(vringh_need_notify_kern);
1009
1010MODULE_LICENSE("GPL");