aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/glue.c55
-rw-r--r--drivers/acpi/processor_core.c3
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/ata/libata-acpi.c7
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/qos.c217
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/regmap/regmap-irq.c1
-rw-r--r--drivers/char/random.c12
-rw-r--r--drivers/cpufreq/cpufreq_governor.h2
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c42
-rw-r--r--drivers/firmware/dmi_scan.c5
-rw-r--r--drivers/firmware/efivars.c131
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c25
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c26
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c173
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c12
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/hid/hid-logitech-dj.c22
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c30
-rw-r--r--drivers/hwmon/sht15.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c1
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c8
-rw-r--r--drivers/input/mouse/alps.c85
-rw-r--r--drivers/input/mouse/alps.h1
-rw-r--r--drivers/input/mouse/cypress_ps2.c19
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/input/touchscreen/ads7846.c7
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c68
-rw-r--r--drivers/input/touchscreen/mms114.c34
-rw-r--r--drivers/iommu/dmar.c1
-rw-r--r--drivers/isdn/i4l/isdn_tty.c4
-rw-r--r--drivers/mailbox/pl320-ipc.c3
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c1
-rw-r--r--drivers/mtd/mtdchar.c1
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c36
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c81
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c76
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c2
-rw-r--r--drivers/net/hippi/rrunner.c3
-rw-r--r--drivers/net/macvlan.c1
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c133
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h34
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c266
-rw-r--r--drivers/oprofile/oprofilefs.c1
-rw-r--r--drivers/pci/pci-acpi.c8
-rw-r--r--drivers/platform/x86/chromeos_laptop.c41
-rw-r--r--drivers/pnp/pnpacpi/core.c8
-rw-r--r--drivers/regulator/core.c12
-rw-r--r--drivers/regulator/db8500-prcmu.c4
-rw-r--r--drivers/regulator/palmas-regulator.c3
-rw-r--r--drivers/regulator/twl-regulator.c9
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/staging/ccg/f_fs.c1
-rw-r--r--drivers/usb/core/usb-acpi.c9
-rw-r--r--drivers/usb/gadget/f_fs.c1
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/xen/xenfs/super.c1
127 files changed, 1445 insertions, 1002 deletions
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index ef6f155469b5..40a84cc6740c 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -36,12 +36,11 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
36{ 36{
37 if (acpi_disabled) 37 if (acpi_disabled)
38 return -ENODEV; 38 return -ENODEV;
39 if (type && type->bus && type->find_device) { 39 if (type && type->match && type->find_device) {
40 down_write(&bus_type_sem); 40 down_write(&bus_type_sem);
41 list_add_tail(&type->list, &bus_type_list); 41 list_add_tail(&type->list, &bus_type_list);
42 up_write(&bus_type_sem); 42 up_write(&bus_type_sem);
43 printk(KERN_INFO PREFIX "bus type %s registered\n", 43 printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
44 type->bus->name);
45 return 0; 44 return 0;
46 } 45 }
47 return -ENODEV; 46 return -ENODEV;
@@ -56,24 +55,21 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
56 down_write(&bus_type_sem); 55 down_write(&bus_type_sem);
57 list_del_init(&type->list); 56 list_del_init(&type->list);
58 up_write(&bus_type_sem); 57 up_write(&bus_type_sem);
59 printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n", 58 printk(KERN_INFO PREFIX "bus type %s unregistered\n",
60 type->bus->name); 59 type->name);
61 return 0; 60 return 0;
62 } 61 }
63 return -ENODEV; 62 return -ENODEV;
64} 63}
65EXPORT_SYMBOL_GPL(unregister_acpi_bus_type); 64EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
66 65
67static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type) 66static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
68{ 67{
69 struct acpi_bus_type *tmp, *ret = NULL; 68 struct acpi_bus_type *tmp, *ret = NULL;
70 69
71 if (!type)
72 return NULL;
73
74 down_read(&bus_type_sem); 70 down_read(&bus_type_sem);
75 list_for_each_entry(tmp, &bus_type_list, list) { 71 list_for_each_entry(tmp, &bus_type_list, list) {
76 if (tmp->bus == type) { 72 if (tmp->match(dev)) {
77 ret = tmp; 73 ret = tmp;
78 break; 74 break;
79 } 75 }
@@ -82,22 +78,6 @@ static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
82 return ret; 78 return ret;
83} 79}
84 80
85static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
86{
87 struct acpi_bus_type *tmp;
88 int ret = -ENODEV;
89
90 down_read(&bus_type_sem);
91 list_for_each_entry(tmp, &bus_type_list, list) {
92 if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
93 ret = 0;
94 break;
95 }
96 }
97 up_read(&bus_type_sem);
98 return ret;
99}
100
101static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, 81static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
102 void *addr_p, void **ret_p) 82 void *addr_p, void **ret_p)
103{ 83{
@@ -261,29 +241,12 @@ err:
261 241
262static int acpi_platform_notify(struct device *dev) 242static int acpi_platform_notify(struct device *dev)
263{ 243{
264 struct acpi_bus_type *type; 244 struct acpi_bus_type *type = acpi_get_bus_type(dev);
265 acpi_handle handle; 245 acpi_handle handle;
266 int ret; 246 int ret;
267 247
268 ret = acpi_bind_one(dev, NULL); 248 ret = acpi_bind_one(dev, NULL);
269 if (ret && (!dev->bus || !dev->parent)) { 249 if (ret && type) {
270 /* bridge devices genernally haven't bus or parent */
271 ret = acpi_find_bridge_device(dev, &handle);
272 if (!ret) {
273 ret = acpi_bind_one(dev, handle);
274 if (ret)
275 goto out;
276 }
277 }
278
279 type = acpi_get_bus_type(dev->bus);
280 if (ret) {
281 if (!type || !type->find_device) {
282 DBG("No ACPI bus support for %s\n", dev_name(dev));
283 ret = -EINVAL;
284 goto out;
285 }
286
287 ret = type->find_device(dev, &handle); 250 ret = type->find_device(dev, &handle);
288 if (ret) { 251 if (ret) {
289 DBG("Unable to get handle for %s\n", dev_name(dev)); 252 DBG("Unable to get handle for %s\n", dev_name(dev));
@@ -316,7 +279,7 @@ static int acpi_platform_notify_remove(struct device *dev)
316{ 279{
317 struct acpi_bus_type *type; 280 struct acpi_bus_type *type;
318 281
319 type = acpi_get_bus_type(dev->bus); 282 type = acpi_get_bus_type(dev);
320 if (type && type->cleanup) 283 if (type && type->cleanup)
321 type->cleanup(dev); 284 type->cleanup(dev);
322 285
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index eff722278ff5..164d49569aeb 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -158,8 +158,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
158 } 158 }
159 159
160exit: 160exit:
161 if (buffer.pointer) 161 kfree(buffer.pointer);
162 kfree(buffer.pointer);
163 return apic_id; 162 return apic_id;
164} 163}
165 164
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index df34bd04ae62..bec717ffd25f 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -559,7 +559,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
559 return 0; 559 return 0;
560#endif 560#endif
561 561
562 BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); 562 BUG_ON(pr->id >= nr_cpu_ids);
563 563
564 /* 564 /*
565 * Buggy BIOS check 565 * Buggy BIOS check
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d3a06a629a1..24213033fbae 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -599,7 +599,6 @@ static void acpi_sleep_suspend_setup(void)
599 status = acpi_get_sleep_type_data(i, &type_a, &type_b); 599 status = acpi_get_sleep_type_data(i, &type_a, &type_b);
600 if (ACPI_SUCCESS(status)) { 600 if (ACPI_SUCCESS(status)) {
601 sleep_states[i] = 1; 601 sleep_states[i] = 1;
602 pr_cont(" S%d", i);
603 } 602 }
604 } 603 }
605 604
@@ -742,7 +741,6 @@ static void acpi_sleep_hibernate_setup(void)
742 hibernation_set_ops(old_suspend_ordering ? 741 hibernation_set_ops(old_suspend_ordering ?
743 &acpi_hibernation_ops_old : &acpi_hibernation_ops); 742 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
744 sleep_states[ACPI_STATE_S4] = 1; 743 sleep_states[ACPI_STATE_S4] = 1;
745 pr_cont(KERN_CONT " S4");
746 if (nosigcheck) 744 if (nosigcheck)
747 return; 745 return;
748 746
@@ -788,6 +786,9 @@ int __init acpi_sleep_init(void)
788{ 786{
789 acpi_status status; 787 acpi_status status;
790 u8 type_a, type_b; 788 u8 type_a, type_b;
789 char supported[ACPI_S_STATE_COUNT * 3 + 1];
790 char *pos = supported;
791 int i;
791 792
792 if (acpi_disabled) 793 if (acpi_disabled)
793 return 0; 794 return 0;
@@ -795,7 +796,6 @@ int __init acpi_sleep_init(void)
795 acpi_sleep_dmi_check(); 796 acpi_sleep_dmi_check();
796 797
797 sleep_states[ACPI_STATE_S0] = 1; 798 sleep_states[ACPI_STATE_S0] = 1;
798 pr_info(PREFIX "(supports S0");
799 799
800 acpi_sleep_suspend_setup(); 800 acpi_sleep_suspend_setup();
801 acpi_sleep_hibernate_setup(); 801 acpi_sleep_hibernate_setup();
@@ -803,11 +803,17 @@ int __init acpi_sleep_init(void)
803 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); 803 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
804 if (ACPI_SUCCESS(status)) { 804 if (ACPI_SUCCESS(status)) {
805 sleep_states[ACPI_STATE_S5] = 1; 805 sleep_states[ACPI_STATE_S5] = 1;
806 pr_cont(" S5");
807 pm_power_off_prepare = acpi_power_off_prepare; 806 pm_power_off_prepare = acpi_power_off_prepare;
808 pm_power_off = acpi_power_off; 807 pm_power_off = acpi_power_off;
809 } 808 }
810 pr_cont(")\n"); 809
810 supported[0] = 0;
811 for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
812 if (sleep_states[i])
813 pos += sprintf(pos, " S%d", i);
814 }
815 pr_info(PREFIX "(supports%s)\n", supported);
816
811 /* 817 /*
812 * Register the tts_notifier to reboot notifier list so that the _TTS 818 * Register the tts_notifier to reboot notifier list so that the _TTS
813 * object can also be evaluated when the system enters S5. 819 * object can also be evaluated when the system enters S5.
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 0ea1018280bd..beea3115577e 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -1144,13 +1144,8 @@ static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
1144 return -ENODEV; 1144 return -ENODEV;
1145} 1145}
1146 1146
1147static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
1148{
1149 return -ENODEV;
1150}
1151
1152static struct acpi_bus_type ata_acpi_bus = { 1147static struct acpi_bus_type ata_acpi_bus = {
1153 .find_bridge = ata_acpi_find_dummy, 1148 .name = "ATA",
1154 .find_device = ata_acpi_find_device, 1149 .find_device = ata_acpi_find_device,
1155}; 1150};
1156 1151
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2b7f77d3fcb0..15beb500a4e4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
99 dev_warn(dev, "parent %s should not be sleeping\n", 99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent)); 100 dev_name(dev->parent));
101 list_add_tail(&dev->power.entry, &dpm_list); 101 list_add_tail(&dev->power.entry, &dpm_list);
102 dev_pm_qos_constraints_init(dev);
103 mutex_unlock(&dpm_list_mtx); 102 mutex_unlock(&dpm_list_mtx);
104} 103}
105 104
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 complete_all(&dev->power.completion); 113 complete_all(&dev->power.completion);
115 mutex_lock(&dpm_list_mtx); 114 mutex_lock(&dpm_list_mtx);
116 dev_pm_qos_constraints_destroy(dev);
117 list_del_init(&dev->power.entry); 115 list_del_init(&dev->power.entry);
118 mutex_unlock(&dpm_list_mtx); 116 mutex_unlock(&dpm_list_mtx);
119 device_wakeup_disable(dev); 117 device_wakeup_disable(dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b16686a0a5a2..cfc3226ec492 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
4{ 4{
5 if (!dev->power.early_init) { 5 if (!dev->power.early_init) {
6 spin_lock_init(&dev->power.lock); 6 spin_lock_init(&dev->power.lock);
7 dev->power.power_state = PMSG_INVALID; 7 dev->power.qos = NULL;
8 dev->power.early_init = true; 8 dev->power.early_init = true;
9 } 9 }
10} 10}
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
56 56
57static inline void device_pm_sleep_init(struct device *dev) {} 57static inline void device_pm_sleep_init(struct device *dev) {}
58 58
59static inline void device_pm_add(struct device *dev) 59static inline void device_pm_add(struct device *dev) {}
60{
61 dev_pm_qos_constraints_init(dev);
62}
63 60
64static inline void device_pm_remove(struct device *dev) 61static inline void device_pm_remove(struct device *dev)
65{ 62{
66 dev_pm_qos_constraints_destroy(dev);
67 pm_runtime_remove(dev); 63 pm_runtime_remove(dev);
68} 64}
69 65
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3d4d1f8aac5c..5f74587ef258 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
44#include <linux/err.h>
44 45
45#include "power.h" 46#include "power.h"
46 47
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
61 struct pm_qos_flags *pqf; 62 struct pm_qos_flags *pqf;
62 s32 val; 63 s32 val;
63 64
64 if (!qos) 65 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED; 66 return PM_QOS_FLAGS_UNDEFINED;
66 67
67 pqf = &qos->flags; 68 pqf = &qos->flags;
@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
101 */ 102 */
102s32 __dev_pm_qos_read_value(struct device *dev) 103s32 __dev_pm_qos_read_value(struct device *dev)
103{ 104{
104 return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; 105 return IS_ERR_OR_NULL(dev->power.qos) ?
106 0 : pm_qos_read_value(&dev->power.qos->latency);
105} 107}
106 108
107/** 109/**
@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
198 return 0; 200 return 0;
199} 201}
200 202
201/** 203static void __dev_pm_qos_hide_latency_limit(struct device *dev);
202 * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. 204static void __dev_pm_qos_hide_flags(struct device *dev);
203 * @dev: target device
204 *
205 * Called from the device PM subsystem during device insertion under
206 * device_pm_lock().
207 */
208void dev_pm_qos_constraints_init(struct device *dev)
209{
210 mutex_lock(&dev_pm_qos_mtx);
211 dev->power.qos = NULL;
212 dev->power.power_state = PMSG_ON;
213 mutex_unlock(&dev_pm_qos_mtx);
214}
215 205
216/** 206/**
217 * dev_pm_qos_constraints_destroy 207 * dev_pm_qos_constraints_destroy
@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
226 struct pm_qos_constraints *c; 216 struct pm_qos_constraints *c;
227 struct pm_qos_flags *f; 217 struct pm_qos_flags *f;
228 218
219 mutex_lock(&dev_pm_qos_mtx);
220
229 /* 221 /*
230 * If the device's PM QoS resume latency limit or PM QoS flags have been 222 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 * exposed to user space, they have to be hidden at this point. 223 * exposed to user space, they have to be hidden at this point.
232 */ 224 */
233 dev_pm_qos_hide_latency_limit(dev); 225 __dev_pm_qos_hide_latency_limit(dev);
234 dev_pm_qos_hide_flags(dev); 226 __dev_pm_qos_hide_flags(dev);
235 227
236 mutex_lock(&dev_pm_qos_mtx);
237
238 dev->power.power_state = PMSG_INVALID;
239 qos = dev->power.qos; 228 qos = dev->power.qos;
240 if (!qos) 229 if (!qos)
241 goto out; 230 goto out;
@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
257 } 246 }
258 247
259 spin_lock_irq(&dev->power.lock); 248 spin_lock_irq(&dev->power.lock);
260 dev->power.qos = NULL; 249 dev->power.qos = ERR_PTR(-ENODEV);
261 spin_unlock_irq(&dev->power.lock); 250 spin_unlock_irq(&dev->power.lock);
262 251
263 kfree(c->notifiers); 252 kfree(c->notifiers);
@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
301 "%s() called for already added request\n", __func__)) 290 "%s() called for already added request\n", __func__))
302 return -EINVAL; 291 return -EINVAL;
303 292
304 req->dev = dev;
305
306 mutex_lock(&dev_pm_qos_mtx); 293 mutex_lock(&dev_pm_qos_mtx);
307 294
308 if (!dev->power.qos) { 295 if (IS_ERR(dev->power.qos))
309 if (dev->power.power_state.event == PM_EVENT_INVALID) { 296 ret = -ENODEV;
310 /* The device has been removed from the system. */ 297 else if (!dev->power.qos)
311 req->dev = NULL; 298 ret = dev_pm_qos_constraints_allocate(dev);
312 ret = -ENODEV;
313 goto out;
314 } else {
315 /*
316 * Allocate the constraints data on the first call to
317 * add_request, i.e. only if the data is not already
318 * allocated and if the device has not been removed.
319 */
320 ret = dev_pm_qos_constraints_allocate(dev);
321 }
322 }
323 299
324 if (!ret) { 300 if (!ret) {
301 req->dev = dev;
325 req->type = type; 302 req->type = type;
326 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 303 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
327 } 304 }
328 305
329 out:
330 mutex_unlock(&dev_pm_qos_mtx); 306 mutex_unlock(&dev_pm_qos_mtx);
331 307
332 return ret; 308 return ret;
@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
344 s32 curr_value; 320 s32 curr_value;
345 int ret = 0; 321 int ret = 0;
346 322
347 if (!req->dev->power.qos) 323 if (!req) /*guard against callers passing in null */
324 return -EINVAL;
325
326 if (WARN(!dev_pm_qos_request_active(req),
327 "%s() called for unknown object\n", __func__))
328 return -EINVAL;
329
330 if (IS_ERR_OR_NULL(req->dev->power.qos))
348 return -ENODEV; 331 return -ENODEV;
349 332
350 switch(req->type) { 333 switch(req->type) {
@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
386{ 369{
387 int ret; 370 int ret;
388 371
372 mutex_lock(&dev_pm_qos_mtx);
373 ret = __dev_pm_qos_update_request(req, new_value);
374 mutex_unlock(&dev_pm_qos_mtx);
375 return ret;
376}
377EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
378
379static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
380{
381 int ret;
382
389 if (!req) /*guard against callers passing in null */ 383 if (!req) /*guard against callers passing in null */
390 return -EINVAL; 384 return -EINVAL;
391 385
@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
393 "%s() called for unknown object\n", __func__)) 387 "%s() called for unknown object\n", __func__))
394 return -EINVAL; 388 return -EINVAL;
395 389
396 mutex_lock(&dev_pm_qos_mtx); 390 if (IS_ERR_OR_NULL(req->dev->power.qos))
397 ret = __dev_pm_qos_update_request(req, new_value); 391 return -ENODEV;
398 mutex_unlock(&dev_pm_qos_mtx);
399 392
393 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
394 memset(req, 0, sizeof(*req));
400 return ret; 395 return ret;
401} 396}
402EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
403 397
404/** 398/**
405 * dev_pm_qos_remove_request - modifies an existing qos request 399 * dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
418 */ 412 */
419int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 413int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
420{ 414{
421 int ret = 0; 415 int ret;
422
423 if (!req) /*guard against callers passing in null */
424 return -EINVAL;
425
426 if (WARN(!dev_pm_qos_request_active(req),
427 "%s() called for unknown object\n", __func__))
428 return -EINVAL;
429 416
430 mutex_lock(&dev_pm_qos_mtx); 417 mutex_lock(&dev_pm_qos_mtx);
431 418 ret = __dev_pm_qos_remove_request(req);
432 if (req->dev->power.qos) {
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
434 PM_QOS_DEFAULT_VALUE);
435 memset(req, 0, sizeof(*req));
436 } else {
437 /* Return if the device has been removed */
438 ret = -ENODEV;
439 }
440
441 mutex_unlock(&dev_pm_qos_mtx); 419 mutex_unlock(&dev_pm_qos_mtx);
442 return ret; 420 return ret;
443} 421}
@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
462 440
463 mutex_lock(&dev_pm_qos_mtx); 441 mutex_lock(&dev_pm_qos_mtx);
464 442
465 if (!dev->power.qos) 443 if (IS_ERR(dev->power.qos))
466 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 444 ret = -ENODEV;
467 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 445 else if (!dev->power.qos)
446 ret = dev_pm_qos_constraints_allocate(dev);
468 447
469 if (!ret) 448 if (!ret)
470 ret = blocking_notifier_chain_register( 449 ret = blocking_notifier_chain_register(
@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
493 mutex_lock(&dev_pm_qos_mtx); 472 mutex_lock(&dev_pm_qos_mtx);
494 473
495 /* Silently return if the constraints object is not present. */ 474 /* Silently return if the constraints object is not present. */
496 if (dev->power.qos) 475 if (!IS_ERR_OR_NULL(dev->power.qos))
497 retval = blocking_notifier_chain_unregister( 476 retval = blocking_notifier_chain_unregister(
498 dev->power.qos->latency.notifiers, 477 dev->power.qos->latency.notifiers,
499 notifier); 478 notifier);
@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
563static void __dev_pm_qos_drop_user_request(struct device *dev, 542static void __dev_pm_qos_drop_user_request(struct device *dev,
564 enum dev_pm_qos_req_type type) 543 enum dev_pm_qos_req_type type)
565{ 544{
545 struct dev_pm_qos_request *req = NULL;
546
566 switch(type) { 547 switch(type) {
567 case DEV_PM_QOS_LATENCY: 548 case DEV_PM_QOS_LATENCY:
568 dev_pm_qos_remove_request(dev->power.qos->latency_req); 549 req = dev->power.qos->latency_req;
569 dev->power.qos->latency_req = NULL; 550 dev->power.qos->latency_req = NULL;
570 break; 551 break;
571 case DEV_PM_QOS_FLAGS: 552 case DEV_PM_QOS_FLAGS:
572 dev_pm_qos_remove_request(dev->power.qos->flags_req); 553 req = dev->power.qos->flags_req;
573 dev->power.qos->flags_req = NULL; 554 dev->power.qos->flags_req = NULL;
574 break; 555 break;
575 } 556 }
557 __dev_pm_qos_remove_request(req);
558 kfree(req);
576} 559}
577 560
578/** 561/**
@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
588 if (!device_is_registered(dev) || value < 0) 571 if (!device_is_registered(dev) || value < 0)
589 return -EINVAL; 572 return -EINVAL;
590 573
591 if (dev->power.qos && dev->power.qos->latency_req)
592 return -EEXIST;
593
594 req = kzalloc(sizeof(*req), GFP_KERNEL); 574 req = kzalloc(sizeof(*req), GFP_KERNEL);
595 if (!req) 575 if (!req)
596 return -ENOMEM; 576 return -ENOMEM;
597 577
598 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 578 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
599 if (ret < 0) 579 if (ret < 0) {
580 kfree(req);
600 return ret; 581 return ret;
582 }
583
584 mutex_lock(&dev_pm_qos_mtx);
585
586 if (IS_ERR_OR_NULL(dev->power.qos))
587 ret = -ENODEV;
588 else if (dev->power.qos->latency_req)
589 ret = -EEXIST;
590
591 if (ret < 0) {
592 __dev_pm_qos_remove_request(req);
593 kfree(req);
594 goto out;
595 }
601 596
602 dev->power.qos->latency_req = req; 597 dev->power.qos->latency_req = req;
603 ret = pm_qos_sysfs_add_latency(dev); 598 ret = pm_qos_sysfs_add_latency(dev);
604 if (ret) 599 if (ret)
605 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 600 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
606 601
602 out:
603 mutex_unlock(&dev_pm_qos_mtx);
607 return ret; 604 return ret;
608} 605}
609EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 606EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
610 607
608static void __dev_pm_qos_hide_latency_limit(struct device *dev)
609{
610 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
611 pm_qos_sysfs_remove_latency(dev);
612 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
613 }
614}
615
611/** 616/**
612 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 617 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
613 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 618 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
614 */ 619 */
615void dev_pm_qos_hide_latency_limit(struct device *dev) 620void dev_pm_qos_hide_latency_limit(struct device *dev)
616{ 621{
617 if (dev->power.qos && dev->power.qos->latency_req) { 622 mutex_lock(&dev_pm_qos_mtx);
618 pm_qos_sysfs_remove_latency(dev); 623 __dev_pm_qos_hide_latency_limit(dev);
619 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 624 mutex_unlock(&dev_pm_qos_mtx);
620 }
621} 625}
622EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 626EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
623 627
@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
634 if (!device_is_registered(dev)) 638 if (!device_is_registered(dev))
635 return -EINVAL; 639 return -EINVAL;
636 640
637 if (dev->power.qos && dev->power.qos->flags_req)
638 return -EEXIST;
639
640 req = kzalloc(sizeof(*req), GFP_KERNEL); 641 req = kzalloc(sizeof(*req), GFP_KERNEL);
641 if (!req) 642 if (!req)
642 return -ENOMEM; 643 return -ENOMEM;
643 644
644 pm_runtime_get_sync(dev);
645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
646 if (ret < 0) 646 if (ret < 0) {
647 goto fail; 647 kfree(req);
648 return ret;
649 }
650
651 pm_runtime_get_sync(dev);
652 mutex_lock(&dev_pm_qos_mtx);
653
654 if (IS_ERR_OR_NULL(dev->power.qos))
655 ret = -ENODEV;
656 else if (dev->power.qos->flags_req)
657 ret = -EEXIST;
658
659 if (ret < 0) {
660 __dev_pm_qos_remove_request(req);
661 kfree(req);
662 goto out;
663 }
648 664
649 dev->power.qos->flags_req = req; 665 dev->power.qos->flags_req = req;
650 ret = pm_qos_sysfs_add_flags(dev); 666 ret = pm_qos_sysfs_add_flags(dev);
651 if (ret) 667 if (ret)
652 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
653 669
654fail: 670 out:
671 mutex_unlock(&dev_pm_qos_mtx);
655 pm_runtime_put(dev); 672 pm_runtime_put(dev);
656 return ret; 673 return ret;
657} 674}
658EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 675EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
659 676
677static void __dev_pm_qos_hide_flags(struct device *dev)
678{
679 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
680 pm_qos_sysfs_remove_flags(dev);
681 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
682 }
683}
684
660/** 685/**
661 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 686 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
662 * @dev: Device whose PM QoS flags are to be hidden from user space. 687 * @dev: Device whose PM QoS flags are to be hidden from user space.
663 */ 688 */
664void dev_pm_qos_hide_flags(struct device *dev) 689void dev_pm_qos_hide_flags(struct device *dev)
665{ 690{
666 if (dev->power.qos && dev->power.qos->flags_req) { 691 pm_runtime_get_sync(dev);
667 pm_qos_sysfs_remove_flags(dev); 692 mutex_lock(&dev_pm_qos_mtx);
668 pm_runtime_get_sync(dev); 693 __dev_pm_qos_hide_flags(dev);
669 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 694 mutex_unlock(&dev_pm_qos_mtx);
670 pm_runtime_put(dev); 695 pm_runtime_put(dev);
671 }
672} 696}
673EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 697EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
674 698
@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
683 s32 value; 707 s32 value;
684 int ret; 708 int ret;
685 709
686 if (!dev->power.qos || !dev->power.qos->flags_req)
687 return -EINVAL;
688
689 pm_runtime_get_sync(dev); 710 pm_runtime_get_sync(dev);
690 mutex_lock(&dev_pm_qos_mtx); 711 mutex_lock(&dev_pm_qos_mtx);
691 712
713 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
714 ret = -EINVAL;
715 goto out;
716 }
717
692 value = dev_pm_qos_requested_flags(dev); 718 value = dev_pm_qos_requested_flags(dev);
693 if (set) 719 if (set)
694 value |= mask; 720 value |= mask;
@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
697 723
698 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 724 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
699 725
726 out:
700 mutex_unlock(&dev_pm_qos_mtx); 727 mutex_unlock(&dev_pm_qos_mtx);
701 pm_runtime_put(dev); 728 pm_runtime_put(dev);
702
703 return ret; 729 return ret;
704} 730}
731#else /* !CONFIG_PM_RUNTIME */
732static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
733static void __dev_pm_qos_hide_flags(struct device *dev) {}
705#endif /* CONFIG_PM_RUNTIME */ 734#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 50d16e3cb0a9..a53ebd265701 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
708 708
709void dpm_sysfs_remove(struct device *dev) 709void dpm_sysfs_remove(struct device *dev)
710{ 710{
711 dev_pm_qos_constraints_destroy(dev);
711 rpm_sysfs_remove(dev); 712 rpm_sysfs_remove(dev);
712 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 713 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
713 sysfs_remove_group(&dev->kobj, &pm_attr_group); 714 sysfs_remove_group(&dev->kobj, &pm_attr_group);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4706c63d0bc6..020ea2b9fd2f 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
184 if (ret < 0) { 184 if (ret < 0) {
185 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 185 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
186 ret); 186 ret);
187 pm_runtime_put(map->dev);
187 return IRQ_NONE; 188 return IRQ_NONE;
188 } 189 }
189 } 190 }
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 594bda9dcfc8..32a6c5764950 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -852,6 +852,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
852 int reserved) 852 int reserved)
853{ 853{
854 unsigned long flags; 854 unsigned long flags;
855 int wakeup_write = 0;
855 856
856 /* Hold lock while accounting */ 857 /* Hold lock while accounting */
857 spin_lock_irqsave(&r->lock, flags); 858 spin_lock_irqsave(&r->lock, flags);
@@ -873,10 +874,8 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
873 else 874 else
874 r->entropy_count = reserved; 875 r->entropy_count = reserved;
875 876
876 if (r->entropy_count < random_write_wakeup_thresh) { 877 if (r->entropy_count < random_write_wakeup_thresh)
877 wake_up_interruptible(&random_write_wait); 878 wakeup_write = 1;
878 kill_fasync(&fasync, SIGIO, POLL_OUT);
879 }
880 } 879 }
881 880
882 DEBUG_ENT("debiting %zu entropy credits from %s%s\n", 881 DEBUG_ENT("debiting %zu entropy credits from %s%s\n",
@@ -884,6 +883,11 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
884 883
885 spin_unlock_irqrestore(&r->lock, flags); 884 spin_unlock_irqrestore(&r->lock, flags);
886 885
886 if (wakeup_write) {
887 wake_up_interruptible(&random_write_wait);
888 kill_fasync(&fasync, SIGIO, POLL_OUT);
889 }
890
887 return nbytes; 891 return nbytes;
888} 892}
889 893
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index d2ac91150600..46bde01eee62 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -64,7 +64,7 @@ static void *get_cpu_dbs_info_s(int cpu) \
64 * dbs: used as a shortform for demand based switching It helps to keep variable 64 * dbs: used as a shortform for demand based switching It helps to keep variable
65 * names smaller, simpler 65 * names smaller, simpler
66 * cdbs: common dbs 66 * cdbs: common dbs
67 * on_*: On-demand governor 67 * od_*: On-demand governor
68 * cs_*: Conservative governor 68 * cs_*: Conservative governor
69 */ 69 */
70 70
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index 66e3a71b81a3..b61b5a3fad64 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -28,13 +28,7 @@
28 28
29static int hb_voltage_change(unsigned int freq) 29static int hb_voltage_change(unsigned int freq)
30{ 30{
31 int i; 31 u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
32 u32 msg[HB_CPUFREQ_IPC_LEN];
33
34 msg[0] = HB_CPUFREQ_CHANGE_NOTE;
35 msg[1] = freq / 1000000;
36 for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
37 msg[i] = 0;
38 32
39 return pl320_ipc_transmit(msg); 33 return pl320_ipc_transmit(msg);
40} 34}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 096fde0ebcb5..f6dd1e761129 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -662,6 +662,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
662 662
663 cpu = all_cpu_data[policy->cpu]; 663 cpu = all_cpu_data[policy->cpu];
664 664
665 if (!policy->cpuinfo.max_freq)
666 return -ENODEV;
667
665 intel_pstate_get_min_max(cpu, &min, &max); 668 intel_pstate_get_min_max(cpu, &min, &max);
666 669
667 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 670 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -747,37 +750,11 @@ static struct cpufreq_driver intel_pstate_driver = {
747 .owner = THIS_MODULE, 750 .owner = THIS_MODULE,
748}; 751};
749 752
750static void intel_pstate_exit(void)
751{
752 int cpu;
753
754 sysfs_remove_group(intel_pstate_kobject,
755 &intel_pstate_attr_group);
756 debugfs_remove_recursive(debugfs_parent);
757
758 cpufreq_unregister_driver(&intel_pstate_driver);
759
760 if (!all_cpu_data)
761 return;
762
763 get_online_cpus();
764 for_each_online_cpu(cpu) {
765 if (all_cpu_data[cpu]) {
766 del_timer_sync(&all_cpu_data[cpu]->timer);
767 kfree(all_cpu_data[cpu]);
768 }
769 }
770
771 put_online_cpus();
772 vfree(all_cpu_data);
773}
774module_exit(intel_pstate_exit);
775
776static int __initdata no_load; 753static int __initdata no_load;
777 754
778static int __init intel_pstate_init(void) 755static int __init intel_pstate_init(void)
779{ 756{
780 int rc = 0; 757 int cpu, rc = 0;
781 const struct x86_cpu_id *id; 758 const struct x86_cpu_id *id;
782 759
783 if (no_load) 760 if (no_load)
@@ -802,7 +779,16 @@ static int __init intel_pstate_init(void)
802 intel_pstate_sysfs_expose_params(); 779 intel_pstate_sysfs_expose_params();
803 return rc; 780 return rc;
804out: 781out:
805 intel_pstate_exit(); 782 get_online_cpus();
783 for_each_online_cpu(cpu) {
784 if (all_cpu_data[cpu]) {
785 del_timer_sync(&all_cpu_data[cpu]->timer);
786 kfree(all_cpu_data[cpu]);
787 }
788 }
789
790 put_online_cpus();
791 vfree(all_cpu_data);
806 return -ENODEV; 792 return -ENODEV;
807} 793}
808device_initcall(intel_pstate_init); 794device_initcall(intel_pstate_init);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 982f1f5f5742..4cd392dbf115 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -442,7 +442,6 @@ static int __init dmi_present(const char __iomem *p)
442static int __init smbios_present(const char __iomem *p) 442static int __init smbios_present(const char __iomem *p)
443{ 443{
444 u8 buf[32]; 444 u8 buf[32];
445 int offset = 0;
446 445
447 memcpy_fromio(buf, p, 32); 446 memcpy_fromio(buf, p, 32);
448 if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) { 447 if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
@@ -461,9 +460,9 @@ static int __init smbios_present(const char __iomem *p)
461 dmi_ver = 0x0206; 460 dmi_ver = 0x0206;
462 break; 461 break;
463 } 462 }
464 offset = 16; 463 return memcmp(p + 16, "_DMI_", 5) || dmi_present(p + 16);
465 } 464 }
466 return dmi_present(buf + offset); 465 return 1;
467} 466}
468 467
469void __init dmi_scan_machine(void) 468void __init dmi_scan_machine(void)
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 7320bf891706..fe62aa392239 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -426,6 +426,44 @@ get_var_data(struct efivars *efivars, struct efi_variable *var)
426 return status; 426 return status;
427} 427}
428 428
429static efi_status_t
430check_var_size_locked(struct efivars *efivars, u32 attributes,
431 unsigned long size)
432{
433 u64 storage_size, remaining_size, max_size;
434 efi_status_t status;
435 const struct efivar_operations *fops = efivars->ops;
436
437 if (!efivars->ops->query_variable_info)
438 return EFI_UNSUPPORTED;
439
440 status = fops->query_variable_info(attributes, &storage_size,
441 &remaining_size, &max_size);
442
443 if (status != EFI_SUCCESS)
444 return status;
445
446 if (!storage_size || size > remaining_size || size > max_size ||
447 (remaining_size - size) < (storage_size / 2))
448 return EFI_OUT_OF_RESOURCES;
449
450 return status;
451}
452
453
454static efi_status_t
455check_var_size(struct efivars *efivars, u32 attributes, unsigned long size)
456{
457 efi_status_t status;
458 unsigned long flags;
459
460 spin_lock_irqsave(&efivars->lock, flags);
461 status = check_var_size_locked(efivars, attributes, size);
462 spin_unlock_irqrestore(&efivars->lock, flags);
463
464 return status;
465}
466
429static ssize_t 467static ssize_t
430efivar_guid_read(struct efivar_entry *entry, char *buf) 468efivar_guid_read(struct efivar_entry *entry, char *buf)
431{ 469{
@@ -547,11 +585,16 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
547 } 585 }
548 586
549 spin_lock_irq(&efivars->lock); 587 spin_lock_irq(&efivars->lock);
550 status = efivars->ops->set_variable(new_var->VariableName, 588
551 &new_var->VendorGuid, 589 status = check_var_size_locked(efivars, new_var->Attributes,
552 new_var->Attributes, 590 new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
553 new_var->DataSize, 591
554 new_var->Data); 592 if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
593 status = efivars->ops->set_variable(new_var->VariableName,
594 &new_var->VendorGuid,
595 new_var->Attributes,
596 new_var->DataSize,
597 new_var->Data);
555 598
556 spin_unlock_irq(&efivars->lock); 599 spin_unlock_irq(&efivars->lock);
557 600
@@ -702,8 +745,7 @@ static ssize_t efivarfs_file_write(struct file *file,
702 u32 attributes; 745 u32 attributes;
703 struct inode *inode = file->f_mapping->host; 746 struct inode *inode = file->f_mapping->host;
704 unsigned long datasize = count - sizeof(attributes); 747 unsigned long datasize = count - sizeof(attributes);
705 unsigned long newdatasize; 748 unsigned long newdatasize, varsize;
706 u64 storage_size, remaining_size, max_size;
707 ssize_t bytes = 0; 749 ssize_t bytes = 0;
708 750
709 if (count < sizeof(attributes)) 751 if (count < sizeof(attributes))
@@ -722,28 +764,18 @@ static ssize_t efivarfs_file_write(struct file *file,
722 * amounts of memory. Pick a default size of 64K if 764 * amounts of memory. Pick a default size of 64K if
723 * QueryVariableInfo() isn't supported by the firmware. 765 * QueryVariableInfo() isn't supported by the firmware.
724 */ 766 */
725 spin_lock_irq(&efivars->lock);
726
727 if (!efivars->ops->query_variable_info)
728 status = EFI_UNSUPPORTED;
729 else {
730 const struct efivar_operations *fops = efivars->ops;
731 status = fops->query_variable_info(attributes, &storage_size,
732 &remaining_size, &max_size);
733 }
734 767
735 spin_unlock_irq(&efivars->lock); 768 varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
769 status = check_var_size(efivars, attributes, varsize);
736 770
737 if (status != EFI_SUCCESS) { 771 if (status != EFI_SUCCESS) {
738 if (status != EFI_UNSUPPORTED) 772 if (status != EFI_UNSUPPORTED)
739 return efi_status_to_err(status); 773 return efi_status_to_err(status);
740 774
741 remaining_size = 65536; 775 if (datasize > 65536)
776 return -ENOSPC;
742 } 777 }
743 778
744 if (datasize > remaining_size)
745 return -ENOSPC;
746
747 data = kmalloc(datasize, GFP_KERNEL); 779 data = kmalloc(datasize, GFP_KERNEL);
748 if (!data) 780 if (!data)
749 return -ENOMEM; 781 return -ENOMEM;
@@ -765,6 +797,19 @@ static ssize_t efivarfs_file_write(struct file *file,
765 */ 797 */
766 spin_lock_irq(&efivars->lock); 798 spin_lock_irq(&efivars->lock);
767 799
800 /*
801 * Ensure that the available space hasn't shrunk below the safe level
802 */
803
804 status = check_var_size_locked(efivars, attributes, varsize);
805
806 if (status != EFI_SUCCESS && status != EFI_UNSUPPORTED) {
807 spin_unlock_irq(&efivars->lock);
808 kfree(data);
809
810 return efi_status_to_err(status);
811 }
812
768 status = efivars->ops->set_variable(var->var.VariableName, 813 status = efivars->ops->set_variable(var->var.VariableName,
769 &var->var.VendorGuid, 814 &var->var.VendorGuid,
770 attributes, datasize, 815 attributes, datasize,
@@ -929,8 +974,8 @@ static bool efivarfs_valid_name(const char *str, int len)
929 if (len < GUID_LEN + 2) 974 if (len < GUID_LEN + 2)
930 return false; 975 return false;
931 976
932 /* GUID should be right after the first '-' */ 977 /* GUID must be preceded by a '-' */
933 if (s - 1 != strchr(str, '-')) 978 if (*(s - 1) != '-')
934 return false; 979 return false;
935 980
936 /* 981 /*
@@ -1118,15 +1163,22 @@ static struct dentry_operations efivarfs_d_ops = {
1118 1163
1119static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name) 1164static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
1120{ 1165{
1166 struct dentry *d;
1121 struct qstr q; 1167 struct qstr q;
1168 int err;
1122 1169
1123 q.name = name; 1170 q.name = name;
1124 q.len = strlen(name); 1171 q.len = strlen(name);
1125 1172
1126 if (efivarfs_d_hash(NULL, NULL, &q)) 1173 err = efivarfs_d_hash(NULL, NULL, &q);
1127 return NULL; 1174 if (err)
1175 return ERR_PTR(err);
1176
1177 d = d_alloc(parent, &q);
1178 if (d)
1179 return d;
1128 1180
1129 return d_alloc(parent, &q); 1181 return ERR_PTR(-ENOMEM);
1130} 1182}
1131 1183
1132static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) 1184static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
@@ -1136,6 +1188,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
1136 struct efivar_entry *entry, *n; 1188 struct efivar_entry *entry, *n;
1137 struct efivars *efivars = &__efivars; 1189 struct efivars *efivars = &__efivars;
1138 char *name; 1190 char *name;
1191 int err = -ENOMEM;
1139 1192
1140 efivarfs_sb = sb; 1193 efivarfs_sb = sb;
1141 1194
@@ -1186,8 +1239,10 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
1186 goto fail_name; 1239 goto fail_name;
1187 1240
1188 dentry = efivarfs_alloc_dentry(root, name); 1241 dentry = efivarfs_alloc_dentry(root, name);
1189 if (!dentry) 1242 if (IS_ERR(dentry)) {
1243 err = PTR_ERR(dentry);
1190 goto fail_inode; 1244 goto fail_inode;
1245 }
1191 1246
1192 /* copied by the above to local storage in the dentry. */ 1247 /* copied by the above to local storage in the dentry. */
1193 kfree(name); 1248 kfree(name);
@@ -1214,7 +1269,7 @@ fail_inode:
1214fail_name: 1269fail_name:
1215 kfree(name); 1270 kfree(name);
1216fail: 1271fail:
1217 return -ENOMEM; 1272 return err;
1218} 1273}
1219 1274
1220static struct dentry *efivarfs_mount(struct file_system_type *fs_type, 1275static struct dentry *efivarfs_mount(struct file_system_type *fs_type,
@@ -1234,6 +1289,7 @@ static struct file_system_type efivarfs_type = {
1234 .mount = efivarfs_mount, 1289 .mount = efivarfs_mount,
1235 .kill_sb = efivarfs_kill_sb, 1290 .kill_sb = efivarfs_kill_sb,
1236}; 1291};
1292MODULE_ALIAS_FS("efivarfs");
1237 1293
1238/* 1294/*
1239 * Handle negative dentry. 1295 * Handle negative dentry.
@@ -1345,7 +1401,6 @@ static int efi_pstore_write(enum pstore_type_id type,
1345 efi_guid_t vendor = LINUX_EFI_CRASH_GUID; 1401 efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
1346 struct efivars *efivars = psi->data; 1402 struct efivars *efivars = psi->data;
1347 int i, ret = 0; 1403 int i, ret = 0;
1348 u64 storage_space, remaining_space, max_variable_size;
1349 efi_status_t status = EFI_NOT_FOUND; 1404 efi_status_t status = EFI_NOT_FOUND;
1350 unsigned long flags; 1405 unsigned long flags;
1351 1406
@@ -1365,11 +1420,11 @@ static int efi_pstore_write(enum pstore_type_id type,
1365 * size: a size of logging data 1420 * size: a size of logging data
1366 * DUMP_NAME_LEN * 2: a maximum size of variable name 1421 * DUMP_NAME_LEN * 2: a maximum size of variable name
1367 */ 1422 */
1368 status = efivars->ops->query_variable_info(PSTORE_EFI_ATTRIBUTES, 1423
1369 &storage_space, 1424 status = check_var_size_locked(efivars, PSTORE_EFI_ATTRIBUTES,
1370 &remaining_space, 1425 size + DUMP_NAME_LEN * 2);
1371 &max_variable_size); 1426
1372 if (status || remaining_space < size + DUMP_NAME_LEN * 2) { 1427 if (status) {
1373 spin_unlock_irqrestore(&efivars->lock, flags); 1428 spin_unlock_irqrestore(&efivars->lock, flags);
1374 *id = part; 1429 *id = part;
1375 return -ENOSPC; 1430 return -ENOSPC;
@@ -1544,6 +1599,14 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
1544 return -EINVAL; 1599 return -EINVAL;
1545 } 1600 }
1546 1601
1602 status = check_var_size_locked(efivars, new_var->Attributes,
1603 new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
1604
1605 if (status && status != EFI_UNSUPPORTED) {
1606 spin_unlock_irq(&efivars->lock);
1607 return efi_status_to_err(status);
1608 }
1609
1547 /* now *really* create the variable via EFI */ 1610 /* now *really* create the variable via EFI */
1548 status = efivars->ops->set_variable(new_var->VariableName, 1611 status = efivars->ops->set_variable(new_var->VariableName,
1549 &new_var->VendorGuid, 1612 &new_var->VendorGuid,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c5b8c81b9440..0a8eceb75902 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */
379 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ 379 INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
380 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ 380 INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
381 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ 381 INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
382 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ 382 INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
383 INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
383 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ 384 INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
384 INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ 385 INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
385 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ 386 INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
386 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ 387 INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
387 INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ 388 INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
388 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ 389 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
389 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ 390 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
390 INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
391 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), 391 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
392 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), 392 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
393 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), 393 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
@@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev)
495 intel_modeset_disable(dev); 495 intel_modeset_disable(dev);
496 496
497 drm_irq_uninstall(dev); 497 drm_irq_uninstall(dev);
498 dev_priv->enable_hotplug_processing = false;
498 } 499 }
499 500
500 i915_save_state(dev); 501 i915_save_state(dev);
@@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev)
568 error = i915_gem_init_hw(dev); 569 error = i915_gem_init_hw(dev);
569 mutex_unlock(&dev->struct_mutex); 570 mutex_unlock(&dev->struct_mutex);
570 571
572 /* We need working interrupts for modeset enabling ... */
573 drm_irq_install(dev);
574
571 intel_modeset_init_hw(dev); 575 intel_modeset_init_hw(dev);
572 intel_modeset_setup_hw_state(dev, false); 576 intel_modeset_setup_hw_state(dev, false);
573 drm_irq_install(dev); 577
578 /*
579 * ... but also need to make sure that hotplug processing
580 * doesn't cause havoc. Like in the driver load code we don't
581 * bother with the tiny race here where we might loose hotplug
582 * notifications.
583 * */
574 intel_hpd_init(dev); 584 intel_hpd_init(dev);
585 dev_priv->enable_hotplug_processing = true;
575 } 586 }
576 587
577 intel_opregion_init(dev); 588 intel_opregion_init(dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2cd97d1cc920..3c7bb0410b51 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
701{ 701{
702 struct drm_device *dev = (struct drm_device *) arg; 702 struct drm_device *dev = (struct drm_device *) arg;
703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
704 u32 de_iir, gt_iir, de_ier, pm_iir; 704 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
705 irqreturn_t ret = IRQ_NONE; 705 irqreturn_t ret = IRQ_NONE;
706 int i; 706 int i;
707 707
@@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
711 de_ier = I915_READ(DEIER); 711 de_ier = I915_READ(DEIER);
712 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 712 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
713 713
714 /* Disable south interrupts. We'll only write to SDEIIR once, so further
715 * interrupts will will be stored on its back queue, and then we'll be
716 * able to process them after we restore SDEIER (as soon as we restore
717 * it, we'll get an interrupt if SDEIIR still has something to process
718 * due to its back queue). */
719 sde_ier = I915_READ(SDEIER);
720 I915_WRITE(SDEIER, 0);
721 POSTING_READ(SDEIER);
722
714 gt_iir = I915_READ(GTIIR); 723 gt_iir = I915_READ(GTIIR);
715 if (gt_iir) { 724 if (gt_iir) {
716 snb_gt_irq_handler(dev, dev_priv, gt_iir); 725 snb_gt_irq_handler(dev, dev_priv, gt_iir);
@@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
759 768
760 I915_WRITE(DEIER, de_ier); 769 I915_WRITE(DEIER, de_ier);
761 POSTING_READ(DEIER); 770 POSTING_READ(DEIER);
771 I915_WRITE(SDEIER, sde_ier);
772 POSTING_READ(SDEIER);
762 773
763 return ret; 774 return ret;
764} 775}
@@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
778 struct drm_device *dev = (struct drm_device *) arg; 789 struct drm_device *dev = (struct drm_device *) arg;
779 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 790 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
780 int ret = IRQ_NONE; 791 int ret = IRQ_NONE;
781 u32 de_iir, gt_iir, de_ier, pm_iir; 792 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
782 793
783 atomic_inc(&dev_priv->irq_received); 794 atomic_inc(&dev_priv->irq_received);
784 795
@@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
787 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 798 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
788 POSTING_READ(DEIER); 799 POSTING_READ(DEIER);
789 800
801 /* Disable south interrupts. We'll only write to SDEIIR once, so further
802 * interrupts will will be stored on its back queue, and then we'll be
803 * able to process them after we restore SDEIER (as soon as we restore
804 * it, we'll get an interrupt if SDEIIR still has something to process
805 * due to its back queue). */
806 sde_ier = I915_READ(SDEIER);
807 I915_WRITE(SDEIER, 0);
808 POSTING_READ(SDEIER);
809
790 de_iir = I915_READ(DEIIR); 810 de_iir = I915_READ(DEIIR);
791 gt_iir = I915_READ(GTIIR); 811 gt_iir = I915_READ(GTIIR);
792 pm_iir = I915_READ(GEN6_PMIIR); 812 pm_iir = I915_READ(GEN6_PMIIR);
@@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
849done: 869done:
850 I915_WRITE(DEIER, de_ier); 870 I915_WRITE(DEIER, de_ier);
851 POSTING_READ(DEIER); 871 POSTING_READ(DEIER);
872 I915_WRITE(SDEIER, sde_ier);
873 POSTING_READ(SDEIER);
852 874
853 return ret; 875 return ret;
854} 876}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 527b664d3434..848992f67d56 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1613,9 +1613,9 @@
1613#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) 1613#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
1614#define ADPA_USE_VGA_HVPOLARITY (1<<15) 1614#define ADPA_USE_VGA_HVPOLARITY (1<<15)
1615#define ADPA_SETS_HVPOLARITY 0 1615#define ADPA_SETS_HVPOLARITY 0
1616#define ADPA_VSYNC_CNTL_DISABLE (1<<11) 1616#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
1617#define ADPA_VSYNC_CNTL_ENABLE 0 1617#define ADPA_VSYNC_CNTL_ENABLE 0
1618#define ADPA_HSYNC_CNTL_DISABLE (1<<10) 1618#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
1619#define ADPA_HSYNC_CNTL_ENABLE 0 1619#define ADPA_HSYNC_CNTL_ENABLE 0
1620#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) 1620#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
1621#define ADPA_VSYNC_ACTIVE_LOW 0 1621#define ADPA_VSYNC_ACTIVE_LOW 0
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 969d08c72d10..32a3693905ec 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder)
88 u32 temp; 88 u32 temp;
89 89
90 temp = I915_READ(crt->adpa_reg); 90 temp = I915_READ(crt->adpa_reg);
91 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 91 temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
92 temp &= ~ADPA_DAC_ENABLE; 92 temp &= ~ADPA_DAC_ENABLE;
93 I915_WRITE(crt->adpa_reg, temp); 93 I915_WRITE(crt->adpa_reg, temp);
94} 94}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d64af5aa4a1c..8d0bac3c35d7 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
1391 struct intel_dp *intel_dp = &intel_dig_port->dp; 1391 struct intel_dp *intel_dp = &intel_dig_port->dp;
1392 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 1392 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1393 enum port port = intel_dig_port->port; 1393 enum port port = intel_dig_port->port;
1394 bool wait;
1395 uint32_t val; 1394 uint32_t val;
1395 bool wait = false;
1396 1396
1397 if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { 1397 if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
1398 val = I915_READ(DDI_BUF_CTL(port)); 1398 val = I915_READ(DDI_BUF_CTL(port));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a05ac2c91ba2..287b42c9d1a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3604 */ 3604 */
3605} 3605}
3606 3606
3607/**
3608 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3609 * cursor plane briefly if not already running after enabling the display
3610 * plane.
3611 * This workaround avoids occasional blank screens when self refresh is
3612 * enabled.
3613 */
3614static void
3615g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3616{
3617 u32 cntl = I915_READ(CURCNTR(pipe));
3618
3619 if ((cntl & CURSOR_MODE) == 0) {
3620 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3621
3622 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3623 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3624 intel_wait_for_vblank(dev_priv->dev, pipe);
3625 I915_WRITE(CURCNTR(pipe), cntl);
3626 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3627 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3628 }
3629}
3630
3607static void i9xx_crtc_enable(struct drm_crtc *crtc) 3631static void i9xx_crtc_enable(struct drm_crtc *crtc)
3608{ 3632{
3609 struct drm_device *dev = crtc->dev; 3633 struct drm_device *dev = crtc->dev;
@@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3629 3653
3630 intel_enable_pipe(dev_priv, pipe, false); 3654 intel_enable_pipe(dev_priv, pipe, false);
3631 intel_enable_plane(dev_priv, plane, pipe); 3655 intel_enable_plane(dev_priv, plane, pipe);
3656 if (IS_G4X(dev))
3657 g4x_fixup_plane(dev_priv, pipe);
3632 3658
3633 intel_crtc_load_lut(crtc); 3659 intel_crtc_load_lut(crtc);
3634 intel_update_fbc(dev); 3660 intel_update_fbc(dev);
@@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7256{ 7282{
7257 struct drm_device *dev = crtc->dev; 7283 struct drm_device *dev = crtc->dev;
7258 struct drm_i915_private *dev_priv = dev->dev_private; 7284 struct drm_i915_private *dev_priv = dev->dev_private;
7259 struct intel_framebuffer *intel_fb; 7285 struct drm_framebuffer *old_fb = crtc->fb;
7260 struct drm_i915_gem_object *obj; 7286 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
7261 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7287 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7262 struct intel_unpin_work *work; 7288 struct intel_unpin_work *work;
7263 unsigned long flags; 7289 unsigned long flags;
@@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7282 7308
7283 work->event = event; 7309 work->event = event;
7284 work->crtc = crtc; 7310 work->crtc = crtc;
7285 intel_fb = to_intel_framebuffer(crtc->fb); 7311 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
7286 work->old_fb_obj = intel_fb->obj;
7287 INIT_WORK(&work->work, intel_unpin_work_fn); 7312 INIT_WORK(&work->work, intel_unpin_work_fn);
7288 7313
7289 ret = drm_vblank_get(dev, intel_crtc->pipe); 7314 ret = drm_vblank_get(dev, intel_crtc->pipe);
@@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7303 intel_crtc->unpin_work = work; 7328 intel_crtc->unpin_work = work;
7304 spin_unlock_irqrestore(&dev->event_lock, flags); 7329 spin_unlock_irqrestore(&dev->event_lock, flags);
7305 7330
7306 intel_fb = to_intel_framebuffer(fb);
7307 obj = intel_fb->obj;
7308
7309 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 7331 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
7310 flush_workqueue(dev_priv->wq); 7332 flush_workqueue(dev_priv->wq);
7311 7333
@@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7340 7362
7341cleanup_pending: 7363cleanup_pending:
7342 atomic_dec(&intel_crtc->unpin_work_count); 7364 atomic_dec(&intel_crtc->unpin_work_count);
7365 crtc->fb = old_fb;
7343 drm_gem_object_unreference(&work->old_fb_obj->base); 7366 drm_gem_object_unreference(&work->old_fb_obj->base);
7344 drm_gem_object_unreference(&obj->base); 7367 drm_gem_object_unreference(&obj->base);
7345 mutex_unlock(&dev->struct_mutex); 7368 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f61cb7998c72..6f728e5ee793 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
353 353
354#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 354#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
355 if (has_aux_irq) 355 if (has_aux_irq)
356 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10); 356 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
357 msecs_to_jiffies(10));
357 else 358 else
358 done = wait_for_atomic(C, 10) == 0; 359 done = wait_for_atomic(C, 10) == 0;
359 if (!done) 360 if (!done)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 61fee7fcdc2c..a1794c6df1bf 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev)
2574 I915_WRITE(GEN6_RC_SLEEP, 0); 2574 I915_WRITE(GEN6_RC_SLEEP, 0);
2575 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 2575 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2576 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 2576 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2577 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 2577 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
2578 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 2578 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2579 2579
2580 /* Check if we are enabling RC6 */ 2580 /* Check if we are enabling RC6 */
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 5ea5033eae0a..4d932c46725d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -112,7 +112,6 @@ struct mga_framebuffer {
112struct mga_fbdev { 112struct mga_fbdev {
113 struct drm_fb_helper helper; 113 struct drm_fb_helper helper;
114 struct mga_framebuffer mfb; 114 struct mga_framebuffer mfb;
115 struct list_head fbdev_list;
116 void *sysram; 115 void *sysram;
117 int size; 116 int size;
118 struct ttm_bo_kmap_obj mapping; 117 struct ttm_bo_kmap_obj mapping;
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 5a88ec51b513..d3dcf54e6233 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
92 int ret; 92 int ret;
93 int data, clock; 93 int data, clock;
94 94
95 WREG_DAC(MGA1064_GEN_IO_CTL2, 1);
95 WREG_DAC(MGA1064_GEN_IO_DATA, 0xff); 96 WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
96 WREG_DAC(MGA1064_GEN_IO_CTL, 0); 97 WREG_DAC(MGA1064_GEN_IO_CTL, 0);
97 98
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d3d99a28ddef..a274b9906ef8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector)
1406static int mga_vga_mode_valid(struct drm_connector *connector, 1406static int mga_vga_mode_valid(struct drm_connector *connector,
1407 struct drm_display_mode *mode) 1407 struct drm_display_mode *mode)
1408{ 1408{
1409 struct drm_device *dev = connector->dev;
1410 struct mga_device *mdev = (struct mga_device*)dev->dev_private;
1411 struct mga_fbdev *mfbdev = mdev->mfbdev;
1412 struct drm_fb_helper *fb_helper = &mfbdev->helper;
1413 struct drm_fb_helper_connector *fb_helper_conn = NULL;
1414 int bpp = 32;
1415 int i = 0;
1416
1409 /* FIXME: Add bandwidth and g200se limitations */ 1417 /* FIXME: Add bandwidth and g200se limitations */
1410 1418
1411 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || 1419 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
@@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1415 return MODE_BAD; 1423 return MODE_BAD;
1416 } 1424 }
1417 1425
1426 /* Validate the mode input by the user */
1427 for (i = 0; i < fb_helper->connector_count; i++) {
1428 if (fb_helper->connector_info[i]->connector == connector) {
1429 /* Found the helper for this connector */
1430 fb_helper_conn = fb_helper->connector_info[i];
1431 if (fb_helper_conn->cmdline_mode.specified) {
1432 if (fb_helper_conn->cmdline_mode.bpp_specified) {
1433 bpp = fb_helper_conn->cmdline_mode.bpp;
1434 }
1435 }
1436 }
1437 }
1438
1439 if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) {
1440 if (fb_helper_conn)
1441 fb_helper_conn->cmdline_mode.specified = false;
1442 return MODE_BAD;
1443 }
1444
1418 return MODE_OK; 1445 return MODE_OK;
1419} 1446}
1420 1447
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 61cec0f6ff1c..4857f913efdd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
350 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); 350 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
351 } 351 }
352 352
353 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918); 353 nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918);
354 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); 354 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
355} 355}
356 356
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 2cc1e6a5eb6a..9c41b58d57e2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init)
869 init->offset += 2; 869 init->offset += 2;
870 870
871 init_wr32(init, dreg, idata); 871 init_wr32(init, dreg, idata);
872 init_mask(init, creg, ~mask, data | idata); 872 init_mask(init, creg, ~mask, data | iaddr);
873 } 873 }
874} 874}
875 875
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index a114a0ed7e98..2e98e8a3f1aa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -142,6 +142,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
142 /* drop port's i2c subdev refcount, i2c handles this itself */ 142 /* drop port's i2c subdev refcount, i2c handles this itself */
143 if (ret == 0) { 143 if (ret == 0) {
144 list_add_tail(&port->head, &i2c->ports); 144 list_add_tail(&port->head, &i2c->ports);
145 atomic_dec(&parent->refcount);
145 atomic_dec(&engine->refcount); 146 atomic_dec(&engine->refcount);
146 } 147 }
147 148
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index d28430cd2ba6..6e7a55f93a85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -47,6 +47,18 @@ nouveau_agp_enabled(struct nouveau_drm *drm)
47 if (drm->agp.stat == UNKNOWN) { 47 if (drm->agp.stat == UNKNOWN) {
48 if (!nouveau_agpmode) 48 if (!nouveau_agpmode)
49 return false; 49 return false;
50#ifdef __powerpc__
51 /* Disable AGP by default on all PowerPC machines for
52 * now -- At least some UniNorth-2 AGP bridges are
53 * known to be broken: DMA from the host to the card
54 * works just fine, but writeback from the card to the
55 * host goes straight to memory untranslated bypassing
56 * the GATT somehow, making them quite painful to deal
57 * with...
58 */
59 if (nouveau_agpmode == -1)
60 return false;
61#endif
50 return true; 62 return true;
51 } 63 }
52 64
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a6237c9cbbc3..87a5a56ed358 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -55,9 +55,9 @@
55 55
56/* offsets in shared sync bo of various structures */ 56/* offsets in shared sync bo of various structures */
57#define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) 57#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
58#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) 58#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
59#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00) 59#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
60#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10) 60#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
61 61
62#define EVO_CORE_HANDLE (0xd1500000) 62#define EVO_CORE_HANDLE (0xd1500000)
63#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) 63#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
@@ -341,10 +341,8 @@ struct nv50_curs {
341 341
342struct nv50_sync { 342struct nv50_sync {
343 struct nv50_dmac base; 343 struct nv50_dmac base;
344 struct { 344 u32 addr;
345 u32 offset; 345 u32 data;
346 u16 value;
347 } sem;
348}; 346};
349 347
350struct nv50_ovly { 348struct nv50_ovly {
@@ -471,13 +469,33 @@ nv50_display_crtc_sema(struct drm_device *dev, int crtc)
471 return nv50_disp(dev)->sync; 469 return nv50_disp(dev)->sync;
472} 470}
473 471
472struct nv50_display_flip {
473 struct nv50_disp *disp;
474 struct nv50_sync *chan;
475};
476
477static bool
478nv50_display_flip_wait(void *data)
479{
480 struct nv50_display_flip *flip = data;
481 if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
482 flip->chan->data);
483 return true;
484 usleep_range(1, 2);
485 return false;
486}
487
474void 488void
475nv50_display_flip_stop(struct drm_crtc *crtc) 489nv50_display_flip_stop(struct drm_crtc *crtc)
476{ 490{
477 struct nv50_sync *sync = nv50_sync(crtc); 491 struct nouveau_device *device = nouveau_dev(crtc->dev);
492 struct nv50_display_flip flip = {
493 .disp = nv50_disp(crtc->dev),
494 .chan = nv50_sync(crtc),
495 };
478 u32 *push; 496 u32 *push;
479 497
480 push = evo_wait(sync, 8); 498 push = evo_wait(flip.chan, 8);
481 if (push) { 499 if (push) {
482 evo_mthd(push, 0x0084, 1); 500 evo_mthd(push, 0x0084, 1);
483 evo_data(push, 0x00000000); 501 evo_data(push, 0x00000000);
@@ -487,8 +505,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
487 evo_data(push, 0x00000000); 505 evo_data(push, 0x00000000);
488 evo_mthd(push, 0x0080, 1); 506 evo_mthd(push, 0x0080, 1);
489 evo_data(push, 0x00000000); 507 evo_data(push, 0x00000000);
490 evo_kick(push, sync); 508 evo_kick(push, flip.chan);
491 } 509 }
510
511 nv_wait_cb(device, nv50_display_flip_wait, &flip);
492} 512}
493 513
494int 514int
@@ -496,11 +516,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
496 struct nouveau_channel *chan, u32 swap_interval) 516 struct nouveau_channel *chan, u32 swap_interval)
497{ 517{
498 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 518 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
499 struct nv50_disp *disp = nv50_disp(crtc->dev);
500 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 519 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
501 struct nv50_sync *sync = nv50_sync(crtc); 520 struct nv50_sync *sync = nv50_sync(crtc);
521 int head = nv_crtc->index, ret;
502 u32 *push; 522 u32 *push;
503 int ret;
504 523
505 swap_interval <<= 4; 524 swap_interval <<= 4;
506 if (swap_interval == 0) 525 if (swap_interval == 0)
@@ -510,58 +529,64 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
510 if (unlikely(push == NULL)) 529 if (unlikely(push == NULL))
511 return -EBUSY; 530 return -EBUSY;
512 531
513 /* synchronise with the rendering channel, if necessary */ 532 if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
514 if (likely(chan)) { 533 ret = RING_SPACE(chan, 8);
534 if (ret)
535 return ret;
536
537 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
538 OUT_RING (chan, NvEvoSema0 + head);
539 OUT_RING (chan, sync->addr ^ 0x10);
540 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
541 OUT_RING (chan, sync->data + 1);
542 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
543 OUT_RING (chan, sync->addr);
544 OUT_RING (chan, sync->data);
545 } else
546 if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
547 u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
548 ret = RING_SPACE(chan, 12);
549 if (ret)
550 return ret;
551
552 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
553 OUT_RING (chan, chan->vram);
554 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
555 OUT_RING (chan, upper_32_bits(addr ^ 0x10));
556 OUT_RING (chan, lower_32_bits(addr ^ 0x10));
557 OUT_RING (chan, sync->data + 1);
558 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
559 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
560 OUT_RING (chan, upper_32_bits(addr));
561 OUT_RING (chan, lower_32_bits(addr));
562 OUT_RING (chan, sync->data);
563 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
564 } else
565 if (chan) {
566 u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
515 ret = RING_SPACE(chan, 10); 567 ret = RING_SPACE(chan, 10);
516 if (ret) 568 if (ret)
517 return ret; 569 return ret;
518 570
519 if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { 571 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
520 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); 572 OUT_RING (chan, upper_32_bits(addr ^ 0x10));
521 OUT_RING (chan, NvEvoSema0 + nv_crtc->index); 573 OUT_RING (chan, lower_32_bits(addr ^ 0x10));
522 OUT_RING (chan, sync->sem.offset); 574 OUT_RING (chan, sync->data + 1);
523 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); 575 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG |
524 OUT_RING (chan, 0xf00d0000 | sync->sem.value); 576 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
525 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2); 577 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
526 OUT_RING (chan, sync->sem.offset ^ 0x10); 578 OUT_RING (chan, upper_32_bits(addr));
527 OUT_RING (chan, 0x74b1e000); 579 OUT_RING (chan, lower_32_bits(addr));
528 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 580 OUT_RING (chan, sync->data);
529 OUT_RING (chan, NvSema); 581 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL |
530 } else 582 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
531 if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { 583 }
532 u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
533 offset += sync->sem.offset;
534
535 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
536 OUT_RING (chan, upper_32_bits(offset));
537 OUT_RING (chan, lower_32_bits(offset));
538 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
539 OUT_RING (chan, 0x00000002);
540 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
541 OUT_RING (chan, upper_32_bits(offset));
542 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
543 OUT_RING (chan, 0x74b1e000);
544 OUT_RING (chan, 0x00000001);
545 } else {
546 u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
547 offset += sync->sem.offset;
548
549 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
550 OUT_RING (chan, upper_32_bits(offset));
551 OUT_RING (chan, lower_32_bits(offset));
552 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
553 OUT_RING (chan, 0x00001002);
554 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
555 OUT_RING (chan, upper_32_bits(offset));
556 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
557 OUT_RING (chan, 0x74b1e000);
558 OUT_RING (chan, 0x00001001);
559 }
560 584
585 if (chan) {
586 sync->addr ^= 0x10;
587 sync->data++;
561 FIRE_RING (chan); 588 FIRE_RING (chan);
562 } else { 589 } else {
563 nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
564 0xf00d0000 | sync->sem.value);
565 evo_sync(crtc->dev); 590 evo_sync(crtc->dev);
566 } 591 }
567 592
@@ -575,9 +600,9 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
575 evo_data(push, 0x40000000); 600 evo_data(push, 0x40000000);
576 } 601 }
577 evo_mthd(push, 0x0088, 4); 602 evo_mthd(push, 0x0088, 4);
578 evo_data(push, sync->sem.offset); 603 evo_data(push, sync->addr);
579 evo_data(push, 0xf00d0000 | sync->sem.value); 604 evo_data(push, sync->data++);
580 evo_data(push, 0x74b1e000); 605 evo_data(push, sync->data);
581 evo_data(push, NvEvoSync); 606 evo_data(push, NvEvoSync);
582 evo_mthd(push, 0x00a0, 2); 607 evo_mthd(push, 0x00a0, 2);
583 evo_data(push, 0x00000000); 608 evo_data(push, 0x00000000);
@@ -605,9 +630,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
605 evo_mthd(push, 0x0080, 1); 630 evo_mthd(push, 0x0080, 1);
606 evo_data(push, 0x00000000); 631 evo_data(push, 0x00000000);
607 evo_kick(push, sync); 632 evo_kick(push, sync);
608
609 sync->sem.offset ^= 0x10;
610 sync->sem.value++;
611 return 0; 633 return 0;
612} 634}
613 635
@@ -1379,7 +1401,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
1379 if (ret) 1401 if (ret)
1380 goto out; 1402 goto out;
1381 1403
1382 head->sync.sem.offset = EVO_SYNC(1 + index, 0x00); 1404 head->sync.addr = EVO_FLIP_SEM0(index);
1405 head->sync.data = 0x00000000;
1383 1406
1384 /* allocate overlay resources */ 1407 /* allocate overlay resources */
1385 ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, 1408 ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
@@ -2112,15 +2135,23 @@ nv50_display_fini(struct drm_device *dev)
2112int 2135int
2113nv50_display_init(struct drm_device *dev) 2136nv50_display_init(struct drm_device *dev)
2114{ 2137{
2115 u32 *push = evo_wait(nv50_mast(dev), 32); 2138 struct nv50_disp *disp = nv50_disp(dev);
2116 if (push) { 2139 struct drm_crtc *crtc;
2117 evo_mthd(push, 0x0088, 1); 2140 u32 *push;
2118 evo_data(push, NvEvoSync); 2141
2119 evo_kick(push, nv50_mast(dev)); 2142 push = evo_wait(nv50_mast(dev), 32);
2120 return 0; 2143 if (!push)
2144 return -EBUSY;
2145
2146 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2147 struct nv50_sync *sync = nv50_sync(crtc);
2148 nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data);
2121 } 2149 }
2122 2150
2123 return -EBUSY; 2151 evo_mthd(push, 0x0088, 1);
2152 evo_data(push, NvEvoSync);
2153 evo_kick(push, nv50_mast(dev));
2154 return 0;
2124} 2155}
2125 2156
2126void 2157void
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3c38ea46531c..305a657bf215 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2438,6 +2438,12 @@ static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
2438 if (tmp & L2_BUSY) 2438 if (tmp & L2_BUSY)
2439 reset_mask |= RADEON_RESET_VMC; 2439 reset_mask |= RADEON_RESET_VMC;
2440 2440
2441 /* Skip MC reset as it's mostly likely not hung, just busy */
2442 if (reset_mask & RADEON_RESET_MC) {
2443 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
2444 reset_mask &= ~RADEON_RESET_MC;
2445 }
2446
2441 return reset_mask; 2447 return reset_mask;
2442} 2448}
2443 2449
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 99fb13286fd0..eb8ac315f92f 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -834,7 +834,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
834 __func__, __LINE__, toffset, surf.base_align); 834 __func__, __LINE__, toffset, surf.base_align);
835 return -EINVAL; 835 return -EINVAL;
836 } 836 }
837 if (moffset & (surf.base_align - 1)) { 837 if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
838 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n", 838 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
839 __func__, __LINE__, moffset, surf.base_align); 839 __func__, __LINE__, moffset, surf.base_align);
840 return -EINVAL; 840 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7cead763be9e..d4c633e12863 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1381,6 +1381,12 @@ static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1381 if (tmp & L2_BUSY) 1381 if (tmp & L2_BUSY)
1382 reset_mask |= RADEON_RESET_VMC; 1382 reset_mask |= RADEON_RESET_VMC;
1383 1383
1384 /* Skip MC reset as it's mostly likely not hung, just busy */
1385 if (reset_mask & RADEON_RESET_MC) {
1386 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1387 reset_mask &= ~RADEON_RESET_MC;
1388 }
1389
1384 return reset_mask; 1390 return reset_mask;
1385} 1391}
1386 1392
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6d4b5611daf4..0740db3fcd22 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1394,6 +1394,12 @@ static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1394 if (r600_is_display_hung(rdev)) 1394 if (r600_is_display_hung(rdev))
1395 reset_mask |= RADEON_RESET_DISPLAY; 1395 reset_mask |= RADEON_RESET_DISPLAY;
1396 1396
1397 /* Skip MC reset as it's mostly likely not hung, just busy */
1398 if (reset_mask & RADEON_RESET_MC) {
1399 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1400 reset_mask &= ~RADEON_RESET_MC;
1401 }
1402
1397 return reset_mask; 1403 return reset_mask;
1398} 1404}
1399 1405
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e403bdda58f..78edadc9e86b 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -970,6 +970,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
970 found = 1; 970 found = 1;
971 } 971 }
972 972
973 /* quirks */
974 /* Radeon 9100 (R200) */
975 if ((dev->pdev->device == 0x514D) &&
976 (dev->pdev->subsystem_vendor == 0x174B) &&
977 (dev->pdev->subsystem_device == 0x7149)) {
978 /* vbios value is bad, use the default */
979 found = 0;
980 }
981
973 if (!found) /* fallback to defaults */ 982 if (!found) /* fallback to defaults */
974 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); 983 radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
975 984
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 167758488ed6..66a7f0fd9620 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -70,9 +70,10 @@
70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA 70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support 71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support
72 * 2.29.0 - R500 FP16 color clear registers 72 * 2.29.0 - R500 FP16 color clear registers
73 * 2.30.0 - fix for FMASK texturing
73 */ 74 */
74#define KMS_DRIVER_MAJOR 2 75#define KMS_DRIVER_MAJOR 2
75#define KMS_DRIVER_MINOR 29 76#define KMS_DRIVER_MINOR 30
76#define KMS_DRIVER_PATCHLEVEL 0 77#define KMS_DRIVER_PATCHLEVEL 0
77int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 78int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
78int radeon_driver_unload_kms(struct drm_device *dev); 79int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 90374dd77960..48f80cd42d8f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -400,6 +400,9 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
400{ 400{
401 unsigned long irqflags; 401 unsigned long irqflags;
402 402
403 if (!rdev->ddev->irq_enabled)
404 return;
405
403 spin_lock_irqsave(&rdev->irq.lock, irqflags); 406 spin_lock_irqsave(&rdev->irq.lock, irqflags);
404 rdev->irq.afmt[block] = true; 407 rdev->irq.afmt[block] = true;
405 radeon_irq_set(rdev); 408 radeon_irq_set(rdev);
@@ -419,6 +422,9 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
419{ 422{
420 unsigned long irqflags; 423 unsigned long irqflags;
421 424
425 if (!rdev->ddev->irq_enabled)
426 return;
427
422 spin_lock_irqsave(&rdev->irq.lock, irqflags); 428 spin_lock_irqsave(&rdev->irq.lock, irqflags);
423 rdev->irq.afmt[block] = false; 429 rdev->irq.afmt[block] = false;
424 radeon_irq_set(rdev); 430 radeon_irq_set(rdev);
@@ -438,6 +444,9 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
438 unsigned long irqflags; 444 unsigned long irqflags;
439 int i; 445 int i;
440 446
447 if (!rdev->ddev->irq_enabled)
448 return;
449
441 spin_lock_irqsave(&rdev->irq.lock, irqflags); 450 spin_lock_irqsave(&rdev->irq.lock, irqflags);
442 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) 451 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
443 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i)); 452 rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
@@ -458,6 +467,9 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
458 unsigned long irqflags; 467 unsigned long irqflags;
459 int i; 468 int i;
460 469
470 if (!rdev->ddev->irq_enabled)
471 return;
472
461 spin_lock_irqsave(&rdev->irq.lock, irqflags); 473 spin_lock_irqsave(&rdev->irq.lock, irqflags);
462 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) 474 for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
463 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i)); 475 rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 80979ed951eb..9128120da044 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2284,6 +2284,12 @@ static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
2284 if (tmp & L2_BUSY) 2284 if (tmp & L2_BUSY)
2285 reset_mask |= RADEON_RESET_VMC; 2285 reset_mask |= RADEON_RESET_VMC;
2286 2286
2287 /* Skip MC reset as it's mostly likely not hung, just busy */
2288 if (reset_mask & RADEON_RESET_MC) {
2289 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
2290 reset_mask &= ~RADEON_RESET_MC;
2291 }
2292
2287 return reset_mask; 2293 return reset_mask;
2288} 2294}
2289 2295
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index c92955df0658..be1daf7344d3 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -4,7 +4,6 @@ config DRM_TEGRA
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER 5 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
7 select DRM_HDMI
8 select FB_CFB_FILLRECT 7 select FB_CFB_FILLRECT
9 select FB_CFB_COPYAREA 8 select FB_CFB_COPYAREA
10 select FB_CFB_IMAGEBLIT 9 select FB_CFB_IMAGEBLIT
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 9500f2f3f8fe..8758f38c948c 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -459,19 +459,25 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
459 struct dj_report *dj_report) 459 struct dj_report *dj_report)
460{ 460{
461 struct hid_device *hdev = djrcv_dev->hdev; 461 struct hid_device *hdev = djrcv_dev->hdev;
462 int sent_bytes; 462 struct hid_report *report;
463 struct hid_report_enum *output_report_enum;
464 u8 *data = (u8 *)(&dj_report->device_index);
465 int i;
463 466
464 if (!hdev->hid_output_raw_report) { 467 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
465 dev_err(&hdev->dev, "%s:" 468 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
466 "hid_output_raw_report is null\n", __func__); 469
470 if (!report) {
471 dev_err(&hdev->dev, "%s: unable to find dj report\n", __func__);
467 return -ENODEV; 472 return -ENODEV;
468 } 473 }
469 474
470 sent_bytes = hdev->hid_output_raw_report(hdev, (u8 *) dj_report, 475 for (i = 0; i < report->field[0]->report_count; i++)
471 sizeof(struct dj_report), 476 report->field[0]->value[i] = data[i];
472 HID_OUTPUT_REPORT); 477
478 usbhid_submit_report(hdev, report, USB_DIR_OUT);
473 479
474 return (sent_bytes < 0) ? sent_bytes : 0; 480 return 0;
475} 481}
476 482
477static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) 483static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 9652a2c92a24..a58de38e23d8 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -62,7 +62,7 @@ struct ltc2978_data {
62 int temp_min, temp_max; 62 int temp_min, temp_max;
63 int vout_min[8], vout_max[8]; 63 int vout_min[8], vout_max[8];
64 int iout_max[2]; 64 int iout_max[2];
65 int temp2_max[2]; 65 int temp2_max;
66 struct pmbus_driver_info info; 66 struct pmbus_driver_info info;
67}; 67};
68 68
@@ -204,10 +204,9 @@ static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
204 ret = pmbus_read_word_data(client, page, 204 ret = pmbus_read_word_data(client, page,
205 LTC3880_MFR_TEMPERATURE2_PEAK); 205 LTC3880_MFR_TEMPERATURE2_PEAK);
206 if (ret >= 0) { 206 if (ret >= 0) {
207 if (lin11_to_val(ret) 207 if (lin11_to_val(ret) > lin11_to_val(data->temp2_max))
208 > lin11_to_val(data->temp2_max[page])) 208 data->temp2_max = ret;
209 data->temp2_max[page] = ret; 209 ret = data->temp2_max;
210 ret = data->temp2_max[page];
211 } 210 }
212 break; 211 break;
213 case PMBUS_VIRT_READ_VIN_MIN: 212 case PMBUS_VIRT_READ_VIN_MIN:
@@ -248,11 +247,11 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
248 247
249 switch (reg) { 248 switch (reg) {
250 case PMBUS_VIRT_RESET_IOUT_HISTORY: 249 case PMBUS_VIRT_RESET_IOUT_HISTORY:
251 data->iout_max[page] = 0x7fff; 250 data->iout_max[page] = 0x7c00;
252 ret = ltc2978_clear_peaks(client, page, data->id); 251 ret = ltc2978_clear_peaks(client, page, data->id);
253 break; 252 break;
254 case PMBUS_VIRT_RESET_TEMP2_HISTORY: 253 case PMBUS_VIRT_RESET_TEMP2_HISTORY:
255 data->temp2_max[page] = 0x7fff; 254 data->temp2_max = 0x7c00;
256 ret = ltc2978_clear_peaks(client, page, data->id); 255 ret = ltc2978_clear_peaks(client, page, data->id);
257 break; 256 break;
258 case PMBUS_VIRT_RESET_VOUT_HISTORY: 257 case PMBUS_VIRT_RESET_VOUT_HISTORY:
@@ -262,12 +261,12 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
262 break; 261 break;
263 case PMBUS_VIRT_RESET_VIN_HISTORY: 262 case PMBUS_VIRT_RESET_VIN_HISTORY:
264 data->vin_min = 0x7bff; 263 data->vin_min = 0x7bff;
265 data->vin_max = 0; 264 data->vin_max = 0x7c00;
266 ret = ltc2978_clear_peaks(client, page, data->id); 265 ret = ltc2978_clear_peaks(client, page, data->id);
267 break; 266 break;
268 case PMBUS_VIRT_RESET_TEMP_HISTORY: 267 case PMBUS_VIRT_RESET_TEMP_HISTORY:
269 data->temp_min = 0x7bff; 268 data->temp_min = 0x7bff;
270 data->temp_max = 0x7fff; 269 data->temp_max = 0x7c00;
271 ret = ltc2978_clear_peaks(client, page, data->id); 270 ret = ltc2978_clear_peaks(client, page, data->id);
272 break; 271 break;
273 default: 272 default:
@@ -321,12 +320,13 @@ static int ltc2978_probe(struct i2c_client *client,
321 info = &data->info; 320 info = &data->info;
322 info->write_word_data = ltc2978_write_word_data; 321 info->write_word_data = ltc2978_write_word_data;
323 322
324 data->vout_min[0] = 0xffff;
325 data->vin_min = 0x7bff; 323 data->vin_min = 0x7bff;
324 data->vin_max = 0x7c00;
326 data->temp_min = 0x7bff; 325 data->temp_min = 0x7bff;
327 data->temp_max = 0x7fff; 326 data->temp_max = 0x7c00;
327 data->temp2_max = 0x7c00;
328 328
329 switch (id->driver_data) { 329 switch (data->id) {
330 case ltc2978: 330 case ltc2978:
331 info->read_word_data = ltc2978_read_word_data; 331 info->read_word_data = ltc2978_read_word_data;
332 info->pages = 8; 332 info->pages = 8;
@@ -336,7 +336,6 @@ static int ltc2978_probe(struct i2c_client *client,
336 for (i = 1; i < 8; i++) { 336 for (i = 1; i < 8; i++) {
337 info->func[i] = PMBUS_HAVE_VOUT 337 info->func[i] = PMBUS_HAVE_VOUT
338 | PMBUS_HAVE_STATUS_VOUT; 338 | PMBUS_HAVE_STATUS_VOUT;
339 data->vout_min[i] = 0xffff;
340 } 339 }
341 break; 340 break;
342 case ltc3880: 341 case ltc3880:
@@ -352,11 +351,14 @@ static int ltc2978_probe(struct i2c_client *client,
352 | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT 351 | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
353 | PMBUS_HAVE_POUT 352 | PMBUS_HAVE_POUT
354 | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP; 353 | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
355 data->vout_min[1] = 0xffff; 354 data->iout_max[0] = 0x7c00;
355 data->iout_max[1] = 0x7c00;
356 break; 356 break;
357 default: 357 default:
358 return -ENODEV; 358 return -ENODEV;
359 } 359 }
360 for (i = 0; i < info->pages; i++)
361 data->vout_min[i] = 0xffff;
360 362
361 return pmbus_do_probe(client, id, info); 363 return pmbus_do_probe(client, id, info);
362} 364}
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index bfe326e896df..2507f902fb7a 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -965,7 +965,13 @@ static int sht15_probe(struct platform_device *pdev)
965 if (voltage) 965 if (voltage)
966 data->supply_uv = voltage; 966 data->supply_uv = voltage;
967 967
968 regulator_enable(data->reg); 968 ret = regulator_enable(data->reg);
969 if (ret != 0) {
970 dev_err(&pdev->dev,
971 "failed to enable regulator: %d\n", ret);
972 return ret;
973 }
974
969 /* 975 /*
970 * Setup a notifier block to update this if another device 976 * Setup a notifier block to update this if another device
971 * causes the voltage to change 977 * causes the voltage to change
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index a479375a8fd8..e0c404bdc4a8 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -410,6 +410,7 @@ static struct file_system_type ipathfs_fs_type = {
410 .mount = ipathfs_mount, 410 .mount = ipathfs_mount,
411 .kill_sb = ipathfs_kill_super, 411 .kill_sb = ipathfs_kill_super,
412}; 412};
413MODULE_ALIAS_FS("ipathfs");
413 414
414int __init ipath_init_ipathfs(void) 415int __init ipath_init_ipathfs(void)
415{ 416{
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 644bd6f6467c..f247fc6e6182 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -604,6 +604,7 @@ static struct file_system_type qibfs_fs_type = {
604 .mount = qibfs_mount, 604 .mount = qibfs_mount,
605 .kill_sb = qibfs_kill_super, 605 .kill_sb = qibfs_kill_super,
606}; 606};
607MODULE_ALIAS_FS("ipathfs");
607 608
608int __init qib_init_qibfs(void) 609int __init qib_init_qibfs(void)
609{ 610{
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 2fb0d76a04c4..208de7cbb7fa 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -70,8 +70,6 @@
70#define TC3589x_EVT_INT_CLR 0x2 70#define TC3589x_EVT_INT_CLR 0x2
71#define TC3589x_KBD_INT_CLR 0x1 71#define TC3589x_KBD_INT_CLR 0x1
72 72
73#define TC3589x_KBD_KEYMAP_SIZE 64
74
75/** 73/**
76 * struct tc_keypad - data structure used by keypad driver 74 * struct tc_keypad - data structure used by keypad driver
77 * @tc3589x: pointer to tc35893 75 * @tc3589x: pointer to tc35893
@@ -88,7 +86,7 @@ struct tc_keypad {
88 const struct tc3589x_keypad_platform_data *board; 86 const struct tc3589x_keypad_platform_data *board;
89 unsigned int krow; 87 unsigned int krow;
90 unsigned int kcol; 88 unsigned int kcol;
91 unsigned short keymap[TC3589x_KBD_KEYMAP_SIZE]; 89 unsigned short *keymap;
92 bool keypad_stopped; 90 bool keypad_stopped;
93}; 91};
94 92
@@ -338,12 +336,14 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
338 336
339 error = matrix_keypad_build_keymap(plat->keymap_data, NULL, 337 error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
340 TC3589x_MAX_KPROW, TC3589x_MAX_KPCOL, 338 TC3589x_MAX_KPROW, TC3589x_MAX_KPCOL,
341 keypad->keymap, input); 339 NULL, input);
342 if (error) { 340 if (error) {
343 dev_err(&pdev->dev, "Failed to build keymap\n"); 341 dev_err(&pdev->dev, "Failed to build keymap\n");
344 goto err_free_mem; 342 goto err_free_mem;
345 } 343 }
346 344
345 keypad->keymap = input->keycode;
346
347 input_set_capability(input, EV_MSC, MSC_SCAN); 347 input_set_capability(input, EV_MSC, MSC_SCAN);
348 if (!plat->no_autorepeat) 348 if (!plat->no_autorepeat)
349 __set_bit(EV_REP, input->evbit); 349 __set_bit(EV_REP, input->evbit);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 7b99fc7c9438..0238e0e14335 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -490,6 +490,29 @@ static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
490 f->y_map |= (p[5] & 0x20) << 6; 490 f->y_map |= (p[5] & 0x20) << 6;
491} 491}
492 492
493static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p)
494{
495 f->first_mp = !!(p[0] & 0x02);
496 f->is_mp = !!(p[0] & 0x20);
497
498 f->fingers = ((p[0] & 0x6) >> 1 |
499 (p[0] & 0x10) >> 2);
500 f->x_map = ((p[2] & 0x60) >> 5) |
501 ((p[4] & 0x7f) << 2) |
502 ((p[5] & 0x7f) << 9) |
503 ((p[3] & 0x07) << 16) |
504 ((p[3] & 0x70) << 15) |
505 ((p[0] & 0x01) << 22);
506 f->y_map = (p[1] & 0x7f) |
507 ((p[2] & 0x1f) << 7);
508
509 f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
510 f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
511 f->z = (p[0] & 4) ? 0 : p[5] & 0x7f;
512
513 alps_decode_buttons_v3(f, p);
514}
515
493static void alps_process_touchpad_packet_v3(struct psmouse *psmouse) 516static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
494{ 517{
495 struct alps_data *priv = psmouse->private; 518 struct alps_data *priv = psmouse->private;
@@ -874,7 +897,8 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
874 } 897 }
875 898
876 /* Bytes 2 - pktsize should have 0 in the highest bit */ 899 /* Bytes 2 - pktsize should have 0 in the highest bit */
877 if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && 900 if (priv->proto_version != ALPS_PROTO_V5 &&
901 psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
878 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { 902 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
879 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", 903 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
880 psmouse->pktcnt - 1, 904 psmouse->pktcnt - 1,
@@ -994,8 +1018,7 @@ static int alps_rpt_cmd(struct psmouse *psmouse, int init_command,
994 return 0; 1018 return 0;
995} 1019}
996 1020
997static int alps_enter_command_mode(struct psmouse *psmouse, 1021static int alps_enter_command_mode(struct psmouse *psmouse)
998 unsigned char *resp)
999{ 1022{
1000 unsigned char param[4]; 1023 unsigned char param[4];
1001 1024
@@ -1004,14 +1027,12 @@ static int alps_enter_command_mode(struct psmouse *psmouse,
1004 return -1; 1027 return -1;
1005 } 1028 }
1006 1029
1007 if (param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) { 1030 if ((param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) &&
1031 param[0] != 0x73) {
1008 psmouse_dbg(psmouse, 1032 psmouse_dbg(psmouse,
1009 "unknown response while entering command mode\n"); 1033 "unknown response while entering command mode\n");
1010 return -1; 1034 return -1;
1011 } 1035 }
1012
1013 if (resp)
1014 *resp = param[2];
1015 return 0; 1036 return 0;
1016} 1037}
1017 1038
@@ -1176,7 +1197,7 @@ static int alps_passthrough_mode_v3(struct psmouse *psmouse,
1176{ 1197{
1177 int reg_val, ret = -1; 1198 int reg_val, ret = -1;
1178 1199
1179 if (alps_enter_command_mode(psmouse, NULL)) 1200 if (alps_enter_command_mode(psmouse))
1180 return -1; 1201 return -1;
1181 1202
1182 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008); 1203 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008);
@@ -1216,7 +1237,7 @@ static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base)
1216{ 1237{
1217 int ret = -EIO, reg_val; 1238 int ret = -EIO, reg_val;
1218 1239
1219 if (alps_enter_command_mode(psmouse, NULL)) 1240 if (alps_enter_command_mode(psmouse))
1220 goto error; 1241 goto error;
1221 1242
1222 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08); 1243 reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08);
@@ -1279,7 +1300,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
1279 * supported by this driver. If bit 1 isn't set the packet 1300 * supported by this driver. If bit 1 isn't set the packet
1280 * format is different. 1301 * format is different.
1281 */ 1302 */
1282 if (alps_enter_command_mode(psmouse, NULL) || 1303 if (alps_enter_command_mode(psmouse) ||
1283 alps_command_mode_write_reg(psmouse, 1304 alps_command_mode_write_reg(psmouse,
1284 reg_base + 0x08, 0x82) || 1305 reg_base + 0x08, 0x82) ||
1285 alps_exit_command_mode(psmouse)) 1306 alps_exit_command_mode(psmouse))
@@ -1306,7 +1327,7 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
1306 alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO) 1327 alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO)
1307 goto error; 1328 goto error;
1308 1329
1309 if (alps_enter_command_mode(psmouse, NULL) || 1330 if (alps_enter_command_mode(psmouse) ||
1310 alps_absolute_mode_v3(psmouse)) { 1331 alps_absolute_mode_v3(psmouse)) {
1311 psmouse_err(psmouse, "Failed to enter absolute mode\n"); 1332 psmouse_err(psmouse, "Failed to enter absolute mode\n");
1312 goto error; 1333 goto error;
@@ -1381,7 +1402,7 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
1381 priv->flags &= ~ALPS_DUALPOINT; 1402 priv->flags &= ~ALPS_DUALPOINT;
1382 } 1403 }
1383 1404
1384 if (alps_enter_command_mode(psmouse, NULL) || 1405 if (alps_enter_command_mode(psmouse) ||
1385 alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 || 1406 alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 ||
1386 alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00)) 1407 alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00))
1387 goto error; 1408 goto error;
@@ -1431,7 +1452,7 @@ static int alps_hw_init_v4(struct psmouse *psmouse)
1431 struct ps2dev *ps2dev = &psmouse->ps2dev; 1452 struct ps2dev *ps2dev = &psmouse->ps2dev;
1432 unsigned char param[4]; 1453 unsigned char param[4];
1433 1454
1434 if (alps_enter_command_mode(psmouse, NULL)) 1455 if (alps_enter_command_mode(psmouse))
1435 goto error; 1456 goto error;
1436 1457
1437 if (alps_absolute_mode_v4(psmouse)) { 1458 if (alps_absolute_mode_v4(psmouse)) {
@@ -1499,6 +1520,23 @@ error:
1499 return -1; 1520 return -1;
1500} 1521}
1501 1522
1523static int alps_hw_init_dolphin_v1(struct psmouse *psmouse)
1524{
1525 struct ps2dev *ps2dev = &psmouse->ps2dev;
1526 unsigned char param[2];
1527
1528 /* This is dolphin "v1" as empirically defined by florin9doi */
1529 param[0] = 0x64;
1530 param[1] = 0x28;
1531
1532 if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) ||
1533 ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
1534 ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE))
1535 return -1;
1536
1537 return 0;
1538}
1539
1502static void alps_set_defaults(struct alps_data *priv) 1540static void alps_set_defaults(struct alps_data *priv)
1503{ 1541{
1504 priv->byte0 = 0x8f; 1542 priv->byte0 = 0x8f;
@@ -1532,6 +1570,21 @@ static void alps_set_defaults(struct alps_data *priv)
1532 priv->nibble_commands = alps_v4_nibble_commands; 1570 priv->nibble_commands = alps_v4_nibble_commands;
1533 priv->addr_command = PSMOUSE_CMD_DISABLE; 1571 priv->addr_command = PSMOUSE_CMD_DISABLE;
1534 break; 1572 break;
1573 case ALPS_PROTO_V5:
1574 priv->hw_init = alps_hw_init_dolphin_v1;
1575 priv->process_packet = alps_process_packet_v3;
1576 priv->decode_fields = alps_decode_dolphin;
1577 priv->set_abs_params = alps_set_abs_params_mt;
1578 priv->nibble_commands = alps_v3_nibble_commands;
1579 priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
1580 priv->byte0 = 0xc8;
1581 priv->mask0 = 0xc8;
1582 priv->flags = 0;
1583 priv->x_max = 1360;
1584 priv->y_max = 660;
1585 priv->x_bits = 23;
1586 priv->y_bits = 12;
1587 break;
1535 } 1588 }
1536} 1589}
1537 1590
@@ -1592,6 +1645,12 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
1592 1645
1593 if (alps_match_table(psmouse, priv, e7, ec) == 0) { 1646 if (alps_match_table(psmouse, priv, e7, ec) == 0) {
1594 return 0; 1647 return 0;
1648 } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
1649 ec[0] == 0x73 && ec[1] == 0x01) {
1650 priv->proto_version = ALPS_PROTO_V5;
1651 alps_set_defaults(priv);
1652
1653 return 0;
1595 } else if (ec[0] == 0x88 && ec[1] == 0x08) { 1654 } else if (ec[0] == 0x88 && ec[1] == 0x08) {
1596 priv->proto_version = ALPS_PROTO_V3; 1655 priv->proto_version = ALPS_PROTO_V3;
1597 alps_set_defaults(priv); 1656 alps_set_defaults(priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 970480551b6e..eee59853b9ce 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -16,6 +16,7 @@
16#define ALPS_PROTO_V2 2 16#define ALPS_PROTO_V2 2
17#define ALPS_PROTO_V3 3 17#define ALPS_PROTO_V3 3
18#define ALPS_PROTO_V4 4 18#define ALPS_PROTO_V4 4
19#define ALPS_PROTO_V5 5
19 20
20/** 21/**
21 * struct alps_model_info - touchpad ID table 22 * struct alps_model_info - touchpad ID table
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 1673dc6c8092..f51765fff054 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -236,6 +236,13 @@ static int cypress_read_fw_version(struct psmouse *psmouse)
236 cytp->fw_version = param[2] & FW_VERSION_MASX; 236 cytp->fw_version = param[2] & FW_VERSION_MASX;
237 cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0; 237 cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0;
238 238
239 /*
240 * Trackpad fw_version 11 (in Dell XPS12) yields a bogus response to
241 * CYTP_CMD_READ_TP_METRICS so do not try to use it. LP: #1103594.
242 */
243 if (cytp->fw_version >= 11)
244 cytp->tp_metrics_supported = 0;
245
239 psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version); 246 psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version);
240 psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n", 247 psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n",
241 cytp->tp_metrics_supported); 248 cytp->tp_metrics_supported);
@@ -258,6 +265,9 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse)
258 cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width; 265 cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
259 cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high; 266 cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
260 267
268 if (!cytp->tp_metrics_supported)
269 return 0;
270
261 memset(param, 0, sizeof(param)); 271 memset(param, 0, sizeof(param));
262 if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) { 272 if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) {
263 /* Update trackpad parameters. */ 273 /* Update trackpad parameters. */
@@ -315,18 +325,15 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse)
315 325
316static int cypress_query_hardware(struct psmouse *psmouse) 326static int cypress_query_hardware(struct psmouse *psmouse)
317{ 327{
318 struct cytp_data *cytp = psmouse->private;
319 int ret; 328 int ret;
320 329
321 ret = cypress_read_fw_version(psmouse); 330 ret = cypress_read_fw_version(psmouse);
322 if (ret) 331 if (ret)
323 return ret; 332 return ret;
324 333
325 if (cytp->tp_metrics_supported) { 334 ret = cypress_read_tp_metrics(psmouse);
326 ret = cypress_read_tp_metrics(psmouse); 335 if (ret)
327 if (ret) 336 return ret;
328 return ret;
329 }
330 337
331 return 0; 338 return 0;
332} 339}
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 41b6fbf60112..1daa97913b7d 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2017,6 +2017,9 @@ static const struct wacom_features wacom_features_0x100 =
2017static const struct wacom_features wacom_features_0x101 = 2017static const struct wacom_features wacom_features_0x101 =
2018 { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2018 { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2019 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2019 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2020static const struct wacom_features wacom_features_0x10D =
2021 { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2022 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2020static const struct wacom_features wacom_features_0x4001 = 2023static const struct wacom_features wacom_features_0x4001 =
2021 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255, 2024 { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
2022 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2025 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2201,6 +2204,7 @@ const struct usb_device_id wacom_ids[] = {
2201 { USB_DEVICE_WACOM(0xEF) }, 2204 { USB_DEVICE_WACOM(0xEF) },
2202 { USB_DEVICE_WACOM(0x100) }, 2205 { USB_DEVICE_WACOM(0x100) },
2203 { USB_DEVICE_WACOM(0x101) }, 2206 { USB_DEVICE_WACOM(0x101) },
2207 { USB_DEVICE_WACOM(0x10D) },
2204 { USB_DEVICE_WACOM(0x4001) }, 2208 { USB_DEVICE_WACOM(0x4001) },
2205 { USB_DEVICE_WACOM(0x47) }, 2209 { USB_DEVICE_WACOM(0x47) },
2206 { USB_DEVICE_WACOM(0xF4) }, 2210 { USB_DEVICE_WACOM(0xF4) },
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 4f702b3ec1a3..434c3df250ca 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -236,7 +236,12 @@ static void __ads7846_disable(struct ads7846 *ts)
236/* Must be called with ts->lock held */ 236/* Must be called with ts->lock held */
237static void __ads7846_enable(struct ads7846 *ts) 237static void __ads7846_enable(struct ads7846 *ts)
238{ 238{
239 regulator_enable(ts->reg); 239 int error;
240
241 error = regulator_enable(ts->reg);
242 if (error != 0)
243 dev_err(&ts->spi->dev, "Failed to enable supply: %d\n", error);
244
240 ads7846_restart(ts); 245 ads7846_restart(ts);
241} 246}
242 247
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index d04f810cb1dd..59aa24002c7b 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -176,11 +176,17 @@
176/* Define for MXT_GEN_COMMAND_T6 */ 176/* Define for MXT_GEN_COMMAND_T6 */
177#define MXT_BOOT_VALUE 0xa5 177#define MXT_BOOT_VALUE 0xa5
178#define MXT_BACKUP_VALUE 0x55 178#define MXT_BACKUP_VALUE 0x55
179#define MXT_BACKUP_TIME 25 /* msec */ 179#define MXT_BACKUP_TIME 50 /* msec */
180#define MXT_RESET_TIME 65 /* msec */ 180#define MXT_RESET_TIME 200 /* msec */
181 181
182#define MXT_FWRESET_TIME 175 /* msec */ 182#define MXT_FWRESET_TIME 175 /* msec */
183 183
184/* MXT_SPT_GPIOPWM_T19 field */
185#define MXT_GPIO0_MASK 0x04
186#define MXT_GPIO1_MASK 0x08
187#define MXT_GPIO2_MASK 0x10
188#define MXT_GPIO3_MASK 0x20
189
184/* Command to unlock bootloader */ 190/* Command to unlock bootloader */
185#define MXT_UNLOCK_CMD_MSB 0xaa 191#define MXT_UNLOCK_CMD_MSB 0xaa
186#define MXT_UNLOCK_CMD_LSB 0xdc 192#define MXT_UNLOCK_CMD_LSB 0xdc
@@ -212,6 +218,8 @@
212/* Touchscreen absolute values */ 218/* Touchscreen absolute values */
213#define MXT_MAX_AREA 0xff 219#define MXT_MAX_AREA 0xff
214 220
221#define MXT_PIXELS_PER_MM 20
222
215struct mxt_info { 223struct mxt_info {
216 u8 family_id; 224 u8 family_id;
217 u8 variant_id; 225 u8 variant_id;
@@ -243,6 +251,8 @@ struct mxt_data {
243 const struct mxt_platform_data *pdata; 251 const struct mxt_platform_data *pdata;
244 struct mxt_object *object_table; 252 struct mxt_object *object_table;
245 struct mxt_info info; 253 struct mxt_info info;
254 bool is_tp;
255
246 unsigned int irq; 256 unsigned int irq;
247 unsigned int max_x; 257 unsigned int max_x;
248 unsigned int max_y; 258 unsigned int max_y;
@@ -251,6 +261,7 @@ struct mxt_data {
251 u8 T6_reportid; 261 u8 T6_reportid;
252 u8 T9_reportid_min; 262 u8 T9_reportid_min;
253 u8 T9_reportid_max; 263 u8 T9_reportid_max;
264 u8 T19_reportid;
254}; 265};
255 266
256static bool mxt_object_readable(unsigned int type) 267static bool mxt_object_readable(unsigned int type)
@@ -502,6 +513,21 @@ static int mxt_write_object(struct mxt_data *data,
502 return mxt_write_reg(data->client, reg + offset, val); 513 return mxt_write_reg(data->client, reg + offset, val);
503} 514}
504 515
516static void mxt_input_button(struct mxt_data *data, struct mxt_message *message)
517{
518 struct input_dev *input = data->input_dev;
519 bool button;
520 int i;
521
522 /* Active-low switch */
523 for (i = 0; i < MXT_NUM_GPIO; i++) {
524 if (data->pdata->key_map[i] == KEY_RESERVED)
525 continue;
526 button = !(message->message[0] & MXT_GPIO0_MASK << i);
527 input_report_key(input, data->pdata->key_map[i], button);
528 }
529}
530
505static void mxt_input_touchevent(struct mxt_data *data, 531static void mxt_input_touchevent(struct mxt_data *data,
506 struct mxt_message *message, int id) 532 struct mxt_message *message, int id)
507{ 533{
@@ -585,6 +611,9 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id)
585 int id = reportid - data->T9_reportid_min; 611 int id = reportid - data->T9_reportid_min;
586 mxt_input_touchevent(data, &message, id); 612 mxt_input_touchevent(data, &message, id);
587 update_input = true; 613 update_input = true;
614 } else if (message.reportid == data->T19_reportid) {
615 mxt_input_button(data, &message);
616 update_input = true;
588 } else { 617 } else {
589 mxt_dump_message(dev, &message); 618 mxt_dump_message(dev, &message);
590 } 619 }
@@ -764,6 +793,9 @@ static int mxt_get_object_table(struct mxt_data *data)
764 data->T9_reportid_min = min_id; 793 data->T9_reportid_min = min_id;
765 data->T9_reportid_max = max_id; 794 data->T9_reportid_max = max_id;
766 break; 795 break;
796 case MXT_SPT_GPIOPWM_T19:
797 data->T19_reportid = min_id;
798 break;
767 } 799 }
768 } 800 }
769 801
@@ -777,7 +809,7 @@ static void mxt_free_object_table(struct mxt_data *data)
777 data->T6_reportid = 0; 809 data->T6_reportid = 0;
778 data->T9_reportid_min = 0; 810 data->T9_reportid_min = 0;
779 data->T9_reportid_max = 0; 811 data->T9_reportid_max = 0;
780 812 data->T19_reportid = 0;
781} 813}
782 814
783static int mxt_initialize(struct mxt_data *data) 815static int mxt_initialize(struct mxt_data *data)
@@ -1115,9 +1147,13 @@ static int mxt_probe(struct i2c_client *client,
1115 goto err_free_mem; 1147 goto err_free_mem;
1116 } 1148 }
1117 1149
1118 input_dev->name = "Atmel maXTouch Touchscreen"; 1150 data->is_tp = pdata && pdata->is_tp;
1151
1152 input_dev->name = (data->is_tp) ? "Atmel maXTouch Touchpad" :
1153 "Atmel maXTouch Touchscreen";
1119 snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0", 1154 snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0",
1120 client->adapter->nr, client->addr); 1155 client->adapter->nr, client->addr);
1156
1121 input_dev->phys = data->phys; 1157 input_dev->phys = data->phys;
1122 1158
1123 input_dev->id.bustype = BUS_I2C; 1159 input_dev->id.bustype = BUS_I2C;
@@ -1140,6 +1176,29 @@ static int mxt_probe(struct i2c_client *client,
1140 __set_bit(EV_KEY, input_dev->evbit); 1176 __set_bit(EV_KEY, input_dev->evbit);
1141 __set_bit(BTN_TOUCH, input_dev->keybit); 1177 __set_bit(BTN_TOUCH, input_dev->keybit);
1142 1178
1179 if (data->is_tp) {
1180 int i;
1181 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1182 __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
1183
1184 for (i = 0; i < MXT_NUM_GPIO; i++)
1185 if (pdata->key_map[i] != KEY_RESERVED)
1186 __set_bit(pdata->key_map[i], input_dev->keybit);
1187
1188 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1189 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1190 __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
1191 __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
1192 __set_bit(BTN_TOOL_QUINTTAP, input_dev->keybit);
1193
1194 input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM);
1195 input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM);
1196 input_abs_set_res(input_dev, ABS_MT_POSITION_X,
1197 MXT_PIXELS_PER_MM);
1198 input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
1199 MXT_PIXELS_PER_MM);
1200 }
1201
1143 /* For single touch */ 1202 /* For single touch */
1144 input_set_abs_params(input_dev, ABS_X, 1203 input_set_abs_params(input_dev, ABS_X,
1145 0, data->max_x, 0, 0); 1204 0, data->max_x, 0, 0);
@@ -1258,6 +1317,7 @@ static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
1258static const struct i2c_device_id mxt_id[] = { 1317static const struct i2c_device_id mxt_id[] = {
1259 { "qt602240_ts", 0 }, 1318 { "qt602240_ts", 0 },
1260 { "atmel_mxt_ts", 0 }, 1319 { "atmel_mxt_ts", 0 },
1320 { "atmel_mxt_tp", 0 },
1261 { "mXT224", 0 }, 1321 { "mXT224", 0 },
1262 { } 1322 { }
1263}; 1323};
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 4a29ddf6bf1e..1443532fe6c4 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -314,15 +314,27 @@ static int mms114_start(struct mms114_data *data)
314 struct i2c_client *client = data->client; 314 struct i2c_client *client = data->client;
315 int error; 315 int error;
316 316
317 if (data->core_reg) 317 error = regulator_enable(data->core_reg);
318 regulator_enable(data->core_reg); 318 if (error) {
319 if (data->io_reg) 319 dev_err(&client->dev, "Failed to enable avdd: %d\n", error);
320 regulator_enable(data->io_reg); 320 return error;
321 }
322
323 error = regulator_enable(data->io_reg);
324 if (error) {
325 dev_err(&client->dev, "Failed to enable vdd: %d\n", error);
326 regulator_disable(data->core_reg);
327 return error;
328 }
329
321 mdelay(MMS114_POWERON_DELAY); 330 mdelay(MMS114_POWERON_DELAY);
322 331
323 error = mms114_setup_regs(data); 332 error = mms114_setup_regs(data);
324 if (error < 0) 333 if (error < 0) {
334 regulator_disable(data->io_reg);
335 regulator_disable(data->core_reg);
325 return error; 336 return error;
337 }
326 338
327 if (data->pdata->cfg_pin) 339 if (data->pdata->cfg_pin)
328 data->pdata->cfg_pin(true); 340 data->pdata->cfg_pin(true);
@@ -335,16 +347,20 @@ static int mms114_start(struct mms114_data *data)
335static void mms114_stop(struct mms114_data *data) 347static void mms114_stop(struct mms114_data *data)
336{ 348{
337 struct i2c_client *client = data->client; 349 struct i2c_client *client = data->client;
350 int error;
338 351
339 disable_irq(client->irq); 352 disable_irq(client->irq);
340 353
341 if (data->pdata->cfg_pin) 354 if (data->pdata->cfg_pin)
342 data->pdata->cfg_pin(false); 355 data->pdata->cfg_pin(false);
343 356
344 if (data->io_reg) 357 error = regulator_disable(data->io_reg);
345 regulator_disable(data->io_reg); 358 if (error)
346 if (data->core_reg) 359 dev_warn(&client->dev, "Failed to disable vdd: %d\n", error);
347 regulator_disable(data->core_reg); 360
361 error = regulator_disable(data->core_reg);
362 if (error)
363 dev_warn(&client->dev, "Failed to disable avdd: %d\n", error);
348} 364}
349 365
350static int mms114_input_open(struct input_dev *dev) 366static int mms114_input_open(struct input_dev *dev)
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index dc7e478b7e5f..e5cdaf87822c 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1083,6 +1083,7 @@ static const char *dma_remap_fault_reasons[] =
1083 "non-zero reserved fields in RTP", 1083 "non-zero reserved fields in RTP",
1084 "non-zero reserved fields in CTP", 1084 "non-zero reserved fields in CTP",
1085 "non-zero reserved fields in PTE", 1085 "non-zero reserved fields in PTE",
1086 "PCE for translation request specifies blocking",
1086}; 1087};
1087 1088
1088static const char *irq_remap_fault_reasons[] = 1089static const char *irq_remap_fault_reasons[] =
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index d8a7d8323414..ebaebdf30f98 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -902,7 +902,9 @@ isdn_tty_send_msg(modem_info *info, atemu *m, char *msg)
902 int j; 902 int j;
903 int l; 903 int l;
904 904
905 l = strlen(msg); 905 l = min(strlen(msg), sizeof(cmd.parm) - sizeof(cmd.parm.cmsg)
906 + sizeof(cmd.parm.cmsg.para) - 2);
907
906 if (!l) { 908 if (!l) {
907 isdn_tty_modem_result(RESULT_ERROR, info); 909 isdn_tty_modem_result(RESULT_ERROR, info);
908 return; 910 return;
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index c45b3aedafba..d873cbae2fbb 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -138,8 +138,7 @@ int pl320_ipc_unregister_notifier(struct notifier_block *nb)
138} 138}
139EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier); 139EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
140 140
141static int __init pl320_probe(struct amba_device *adev, 141static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
142 const struct amba_id *id)
143{ 142{
144 int ret; 143 int ret;
145 144
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 6673e578b3e9..ce5b75616b45 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -110,6 +110,7 @@ static struct file_system_type ibmasmfs_type = {
110 .mount = ibmasmfs_mount, 110 .mount = ibmasmfs_mount,
111 .kill_sb = kill_litter_super, 111 .kill_sb = kill_litter_super,
112}; 112};
113MODULE_ALIAS_FS("ibmasmfs");
113 114
114static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent) 115static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
115{ 116{
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 82c06165d3d2..92ab30ab00dc 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1238,6 +1238,7 @@ static struct file_system_type mtd_inodefs_type = {
1238 .mount = mtd_inodefs_mount, 1238 .mount = mtd_inodefs_mount,
1239 .kill_sb = kill_anon_super, 1239 .kill_sb = kill_anon_super,
1240}; 1240};
1241MODULE_ALIAS_FS("mtd_inodefs");
1241 1242
1242static int __init init_mtdchar(void) 1243static int __init init_mtdchar(void)
1243{ 1244{
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 7bd068a6056a..8b4e96e01d6c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1964,7 +1964,6 @@ static int __bond_release_one(struct net_device *bond_dev,
1964 } 1964 }
1965 1965
1966 block_netpoll_tx(); 1966 block_netpoll_tx();
1967 call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
1968 write_lock_bh(&bond->lock); 1967 write_lock_bh(&bond->lock);
1969 1968
1970 slave = bond_get_slave_by_dev(bond, slave_dev); 1969 slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2066,8 +2065,10 @@ static int __bond_release_one(struct net_device *bond_dev,
2066 write_unlock_bh(&bond->lock); 2065 write_unlock_bh(&bond->lock);
2067 unblock_netpoll_tx(); 2066 unblock_netpoll_tx();
2068 2067
2069 if (bond->slave_cnt == 0) 2068 if (bond->slave_cnt == 0) {
2070 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); 2069 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2070 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2071 }
2071 2072
2072 bond_compute_features(bond); 2073 bond_compute_features(bond);
2073 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 2074 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 31c5787970db..77ebae0ac64a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8647 MDIO_WC_DEVAD, 8647 MDIO_WC_DEVAD,
8648 MDIO_WC_REG_DIGITAL5_MISC6, 8648 MDIO_WC_REG_DIGITAL5_MISC6,
8649 &rx_tx_in_reset); 8649 &rx_tx_in_reset);
8650 if (!rx_tx_in_reset) { 8650 if ((!rx_tx_in_reset) &&
8651 (params->link_flags &
8652 PHY_INITIALIZED)) {
8651 bnx2x_warpcore_reset_lane(bp, phy, 1); 8653 bnx2x_warpcore_reset_lane(bp, phy, 1);
8652 bnx2x_warpcore_config_sfi(phy, params); 8654 bnx2x_warpcore_config_sfi(phy, params);
8653 bnx2x_warpcore_reset_lane(bp, phy, 0); 8655 bnx2x_warpcore_reset_lane(bp, phy, 0);
@@ -12527,6 +12529,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12527 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12529 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
12528 vars->mac_type = MAC_TYPE_NONE; 12530 vars->mac_type = MAC_TYPE_NONE;
12529 vars->phy_flags = 0; 12531 vars->phy_flags = 0;
12532 vars->check_kr2_recovery_cnt = 0;
12533 params->link_flags = PHY_INITIALIZED;
12530 /* Driver opens NIG-BRB filters */ 12534 /* Driver opens NIG-BRB filters */
12531 bnx2x_set_rx_filter(params, 1); 12535 bnx2x_set_rx_filter(params, 1);
12532 /* Check if link flap can be avoided */ 12536 /* Check if link flap can be avoided */
@@ -12691,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params,
12691 struct bnx2x *bp = params->bp; 12695 struct bnx2x *bp = params->bp;
12692 vars->link_up = 0; 12696 vars->link_up = 0;
12693 vars->phy_flags = 0; 12697 vars->phy_flags = 0;
12698 params->link_flags &= ~PHY_INITIALIZED;
12694 if (!params->lfa_base) 12699 if (!params->lfa_base)
12695 return bnx2x_link_reset(params, vars, 1); 12700 return bnx2x_link_reset(params, vars, 1);
12696 /* 12701 /*
@@ -13411,6 +13416,7 @@ static void bnx2x_disable_kr2(struct link_params *params,
13411 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; 13416 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
13412 bnx2x_update_link_attr(params, vars->link_attr_sync); 13417 bnx2x_update_link_attr(params, vars->link_attr_sync);
13413 13418
13419 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
13414 /* Restart AN on leading lane */ 13420 /* Restart AN on leading lane */
13415 bnx2x_warpcore_restart_AN_KR(phy, params); 13421 bnx2x_warpcore_restart_AN_KR(phy, params);
13416} 13422}
@@ -13439,6 +13445,15 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13439 return; 13445 return;
13440 } 13446 }
13441 13447
13448 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
13449 * since some switches tend to reinit the AN process and clear the
13450 * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
13451 * and recovered many times
13452 */
13453 if (vars->check_kr2_recovery_cnt > 0) {
13454 vars->check_kr2_recovery_cnt--;
13455 return;
13456 }
13442 lane = bnx2x_get_warpcore_lane(phy, params); 13457 lane = bnx2x_get_warpcore_lane(phy, params);
13443 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 13458 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
13444 MDIO_AER_BLOCK_AER_REG, lane); 13459 MDIO_AER_BLOCK_AER_REG, lane);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index be5c195d03dd..56c2aae4e2c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -309,6 +309,7 @@ struct link_params {
309 req_flow_ctrl is set to AUTO */ 309 req_flow_ctrl is set to AUTO */
310 u16 link_flags; 310 u16 link_flags;
311#define LINK_FLAGS_INT_DISABLED (1<<0) 311#define LINK_FLAGS_INT_DISABLED (1<<0)
312#define PHY_INITIALIZED (1<<1)
312 u32 lfa_base; 313 u32 lfa_base;
313}; 314};
314 315
@@ -342,7 +343,8 @@ struct link_vars {
342 u32 link_status; 343 u32 link_status;
343 u32 eee_status; 344 u32 eee_status;
344 u8 fault_detected; 345 u8 fault_detected;
345 u8 rsrv1; 346 u8 check_kr2_recovery_cnt;
347#define CHECK_KR2_RECOVERY_CNT 5
346 u16 periodic_flags; 348 u16 periodic_flags;
347#define PERIODIC_FLAGS_LINK_EVENT 0x0001 349#define PERIODIC_FLAGS_LINK_EVENT 0x0001
348 350
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 2b2bee61ddd7..0c1a2ef163a5 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1870,6 +1870,8 @@ static void tg3_link_report(struct tg3 *tp)
1870 1870
1871 tg3_ump_link_report(tp); 1871 tg3_ump_link_report(tp);
1872 } 1872 }
1873
1874 tp->link_up = netif_carrier_ok(tp->dev);
1873} 1875}
1874 1876
1875static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1877static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
@@ -2523,12 +2525,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2523 return err; 2525 return err;
2524} 2526}
2525 2527
2526static void tg3_carrier_on(struct tg3 *tp)
2527{
2528 netif_carrier_on(tp->dev);
2529 tp->link_up = true;
2530}
2531
2532static void tg3_carrier_off(struct tg3 *tp) 2528static void tg3_carrier_off(struct tg3 *tp)
2533{ 2529{
2534 netif_carrier_off(tp->dev); 2530 netif_carrier_off(tp->dev);
@@ -2554,7 +2550,7 @@ static int tg3_phy_reset(struct tg3 *tp)
2554 return -EBUSY; 2550 return -EBUSY;
2555 2551
2556 if (netif_running(tp->dev) && tp->link_up) { 2552 if (netif_running(tp->dev) && tp->link_up) {
2557 tg3_carrier_off(tp); 2553 netif_carrier_off(tp->dev);
2558 tg3_link_report(tp); 2554 tg3_link_report(tp);
2559 } 2555 }
2560 2556
@@ -4403,9 +4399,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4403{ 4399{
4404 if (curr_link_up != tp->link_up) { 4400 if (curr_link_up != tp->link_up) {
4405 if (curr_link_up) { 4401 if (curr_link_up) {
4406 tg3_carrier_on(tp); 4402 netif_carrier_on(tp->dev);
4407 } else { 4403 } else {
4408 tg3_carrier_off(tp); 4404 netif_carrier_off(tp->dev);
4409 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4405 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4410 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4406 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4411 } 4407 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4ce62031f62f..8049268ce0f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
497} 497}
498 498
499#define EEPROM_STAT_ADDR 0x7bfc 499#define EEPROM_STAT_ADDR 0x7bfc
500#define VPD_BASE 0
501#define VPD_LEN 512 500#define VPD_LEN 512
501#define VPD_BASE 0x400
502#define VPD_BASE_OLD 0
502 503
503/** 504/**
504 * t4_seeprom_wp - enable/disable EEPROM write protection 505 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
524int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 525int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
525{ 526{
526 u32 cclk_param, cclk_val; 527 u32 cclk_param, cclk_val;
527 int i, ret; 528 int i, ret, addr;
528 int ec, sn; 529 int ec, sn;
529 u8 *vpd, csum; 530 u8 *vpd, csum;
530 unsigned int vpdr_len, kw_offset, id_len; 531 unsigned int vpdr_len, kw_offset, id_len;
@@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
533 if (!vpd) 534 if (!vpd)
534 return -ENOMEM; 535 return -ENOMEM;
535 536
536 ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); 537 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
538 if (ret < 0)
539 goto out;
540 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
541
542 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
537 if (ret < 0) 543 if (ret < 0)
538 goto out; 544 goto out;
539 545
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index ff1efe55ceee..2e2700e3a5ab 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -349,6 +349,7 @@ struct be_adapter {
349 struct pci_dev *pdev; 349 struct pci_dev *pdev;
350 struct net_device *netdev; 350 struct net_device *netdev;
351 351
352 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
352 u8 __iomem *db; /* Door Bell */ 353 u8 __iomem *db; /* Door Bell */
353 354
354 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 355 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 4512e42596d4..6ed46396bb36 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
473 return 0; 473 return 0;
474} 474}
475 475
476static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 476static u16 be_POST_stage_get(struct be_adapter *adapter)
477{ 477{
478 u32 sem; 478 u32 sem;
479 u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
480 SLIPORT_SEMAPHORE_OFFSET_BE;
481 479
482 pci_read_config_dword(adapter->pdev, reg, &sem); 480 if (BEx_chip(adapter))
483 *stage = sem & POST_STAGE_MASK; 481 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
484
485 if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
486 return -1;
487 else 482 else
488 return 0; 483 pci_read_config_dword(adapter->pdev,
484 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
485
486 return sem & POST_STAGE_MASK;
489} 487}
490 488
491int lancer_wait_ready(struct be_adapter *adapter) 489int lancer_wait_ready(struct be_adapter *adapter)
@@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
579 } 577 }
580 578
581 do { 579 do {
582 status = be_POST_stage_get(adapter, &stage); 580 stage = be_POST_stage_get(adapter);
583 if (status) { 581 if (stage == POST_STAGE_ARMFW_RDY)
584 dev_err(dev, "POST error; stage=0x%x\n", stage);
585 return -1;
586 } else if (stage != POST_STAGE_ARMFW_RDY) {
587 if (msleep_interruptible(2000)) {
588 dev_err(dev, "Waiting for POST aborted\n");
589 return -EINTR;
590 }
591 timeout += 2;
592 } else {
593 return 0; 582 return 0;
583
584 dev_info(dev, "Waiting for POST, %ds elapsed\n",
585 timeout);
586 if (msleep_interruptible(2000)) {
587 dev_err(dev, "Waiting for POST aborted\n");
588 return -EINTR;
594 } 589 }
590 timeout += 2;
595 } while (timeout < 60); 591 } while (timeout < 60);
596 592
597 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 593 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index c515eeaaa5d6..89e6d8cfaf0d 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -32,8 +32,8 @@
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore: used for SH & BE *************/ 34/********** MPU semphore: used for SH & BE *************/
35#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c 35#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
36#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 36#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
37#define POST_STAGE_MASK 0x0000FFFF 37#define POST_STAGE_MASK 0x0000FFFF
38#define POST_ERR_MASK 0x1 38#define POST_ERR_MASK 0x1
39#define POST_ERR_SHIFT 31 39#define POST_ERR_SHIFT 31
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 1f8103c0afbf..b8e5019398f0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
3688 3688
3689static void be_unmap_pci_bars(struct be_adapter *adapter) 3689static void be_unmap_pci_bars(struct be_adapter *adapter)
3690{ 3690{
3691 if (adapter->csr)
3692 pci_iounmap(adapter->pdev, adapter->csr);
3691 if (adapter->db) 3693 if (adapter->db)
3692 pci_iounmap(adapter->pdev, adapter->db); 3694 pci_iounmap(adapter->pdev, adapter->db);
3693} 3695}
@@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
3721 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> 3723 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3722 SLI_INTF_IF_TYPE_SHIFT; 3724 SLI_INTF_IF_TYPE_SHIFT;
3723 3725
3726 if (BEx_chip(adapter) && be_physfn(adapter)) {
3727 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3728 if (adapter->csr == NULL)
3729 return -ENOMEM;
3730 }
3731
3724 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); 3732 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3725 if (addr == NULL) 3733 if (addr == NULL)
3726 goto pci_map_err; 3734 goto pci_map_err;
@@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4329 pci_restore_state(pdev); 4337 pci_restore_state(pdev);
4330 4338
4331 /* Check if card is ok and fw is ready */ 4339 /* Check if card is ok and fw is ready */
4340 dev_info(&adapter->pdev->dev,
4341 "Waiting for FW to be ready after EEH reset\n");
4332 status = be_fw_wait_ready(adapter); 4342 status = be_fw_wait_ready(adapter);
4333 if (status) 4343 if (status)
4334 return PCI_ERS_RESULT_DISCONNECT; 4344 return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 8f5832c606e1..e835e7b95f81 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -36,6 +36,7 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
38#include <linux/mdio.h> 38#include <linux/mdio.h>
39#include <linux/pm_runtime.h>
39 40
40#include "e1000.h" 41#include "e1000.h"
41 42
@@ -2236,7 +2237,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
2236 return 0; 2237 return 0;
2237} 2238}
2238 2239
2240static int e1000e_ethtool_begin(struct net_device *netdev)
2241{
2242 return pm_runtime_get_sync(netdev->dev.parent);
2243}
2244
2245static void e1000e_ethtool_complete(struct net_device *netdev)
2246{
2247 pm_runtime_put_sync(netdev->dev.parent);
2248}
2249
2239static const struct ethtool_ops e1000_ethtool_ops = { 2250static const struct ethtool_ops e1000_ethtool_ops = {
2251 .begin = e1000e_ethtool_begin,
2252 .complete = e1000e_ethtool_complete,
2240 .get_settings = e1000_get_settings, 2253 .get_settings = e1000_get_settings,
2241 .set_settings = e1000_set_settings, 2254 .set_settings = e1000_set_settings,
2242 .get_drvinfo = e1000_get_drvinfo, 2255 .get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 382813dfc7a8..1cdec5fd2129 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -782,6 +782,59 @@ release:
782} 782}
783 783
784/** 784/**
785 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
786 * @hw: pointer to the HW structure
787 * @link: link up bool flag
788 *
789 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
790 * preventing further DMA write requests. Workaround the issue by disabling
791 * the de-assertion of the clock request when in 1Gpbs mode.
792 **/
793static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
794{
795 u32 fextnvm6 = er32(FEXTNVM6);
796 s32 ret_val = 0;
797
798 if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
799 u16 kmrn_reg;
800
801 ret_val = hw->phy.ops.acquire(hw);
802 if (ret_val)
803 return ret_val;
804
805 ret_val =
806 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
807 &kmrn_reg);
808 if (ret_val)
809 goto release;
810
811 ret_val =
812 e1000e_write_kmrn_reg_locked(hw,
813 E1000_KMRNCTRLSTA_K1_CONFIG,
814 kmrn_reg &
815 ~E1000_KMRNCTRLSTA_K1_ENABLE);
816 if (ret_val)
817 goto release;
818
819 usleep_range(10, 20);
820
821 ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
822
823 ret_val =
824 e1000e_write_kmrn_reg_locked(hw,
825 E1000_KMRNCTRLSTA_K1_CONFIG,
826 kmrn_reg);
827release:
828 hw->phy.ops.release(hw);
829 } else {
830 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
831 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
832 }
833
834 return ret_val;
835}
836
837/**
785 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 838 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
786 * @hw: pointer to the HW structure 839 * @hw: pointer to the HW structure
787 * 840 *
@@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
818 return ret_val; 871 return ret_val;
819 } 872 }
820 873
874 /* Work-around I218 hang issue */
875 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
876 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
877 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
878 if (ret_val)
879 return ret_val;
880 }
881
821 /* Clear link partner's EEE ability */ 882 /* Clear link partner's EEE ability */
822 hw->dev_spec.ich8lan.eee_lp_ability = 0; 883 hw->dev_spec.ich8lan.eee_lp_ability = 0;
823 884
@@ -3953,8 +4014,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3953 4014
3954 phy_ctrl = er32(PHY_CTRL); 4015 phy_ctrl = er32(PHY_CTRL);
3955 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 4016 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4017
3956 if (hw->phy.type == e1000_phy_i217) { 4018 if (hw->phy.type == e1000_phy_i217) {
3957 u16 phy_reg; 4019 u16 phy_reg, device_id = hw->adapter->pdev->device;
4020
4021 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4022 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4023 u32 fextnvm6 = er32(FEXTNVM6);
4024
4025 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4026 }
3958 4027
3959 ret_val = hw->phy.ops.acquire(hw); 4028 ret_val = hw->phy.ops.acquire(hw);
3960 if (ret_val) 4029 if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index b6d3174d7d2d..8bf4655c2e17 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -92,6 +92,8 @@
92#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 92#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
94 94
95#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
96
95#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 97#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
96 98
97#define E1000_ICH_RAR_ENTRIES 7 99#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index b4eab18e1c16..142ca39a68f6 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4284,6 +4284,7 @@ static int e1000_open(struct net_device *netdev)
4284 netif_start_queue(netdev); 4284 netif_start_queue(netdev);
4285 4285
4286 adapter->idle_check = true; 4286 adapter->idle_check = true;
4287 hw->mac.get_link_status = true;
4287 pm_runtime_put(&pdev->dev); 4288 pm_runtime_put(&pdev->dev);
4288 4289
4289 /* fire a link status change interrupt to start the watchdog */ 4290 /* fire a link status change interrupt to start the watchdog */
@@ -4642,6 +4643,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4642 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4643 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4643 int ret_val; 4644 int ret_val;
4644 4645
4646 pm_runtime_get_sync(&adapter->pdev->dev);
4645 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); 4647 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4646 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); 4648 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4647 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); 4649 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4652,6 +4654,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4652 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); 4654 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
4653 if (ret_val) 4655 if (ret_val)
4654 e_warn("Error reading PHY register\n"); 4656 e_warn("Error reading PHY register\n");
4657 pm_runtime_put_sync(&adapter->pdev->dev);
4655 } else { 4658 } else {
4656 /* Do not read PHY registers if link is not up 4659 /* Do not read PHY registers if link is not up
4657 * Set values to typical power-on defaults 4660 * Set values to typical power-on defaults
@@ -5865,8 +5868,7 @@ release:
5865 return retval; 5868 return retval;
5866} 5869}
5867 5870
5868static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, 5871static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5869 bool runtime)
5870{ 5872{
5871 struct net_device *netdev = pci_get_drvdata(pdev); 5873 struct net_device *netdev = pci_get_drvdata(pdev);
5872 struct e1000_adapter *adapter = netdev_priv(netdev); 5874 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5890,10 +5892,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5890 } 5892 }
5891 e1000e_reset_interrupt_capability(adapter); 5893 e1000e_reset_interrupt_capability(adapter);
5892 5894
5893 retval = pci_save_state(pdev);
5894 if (retval)
5895 return retval;
5896
5897 status = er32(STATUS); 5895 status = er32(STATUS);
5898 if (status & E1000_STATUS_LU) 5896 if (status & E1000_STATUS_LU)
5899 wufc &= ~E1000_WUFC_LNKC; 5897 wufc &= ~E1000_WUFC_LNKC;
@@ -5945,13 +5943,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5945 ew32(WUFC, 0); 5943 ew32(WUFC, 0);
5946 } 5944 }
5947 5945
5948 *enable_wake = !!wufc;
5949
5950 /* make sure adapter isn't asleep if manageability is enabled */
5951 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5952 (hw->mac.ops.check_mng_mode(hw)))
5953 *enable_wake = true;
5954
5955 if (adapter->hw.phy.type == e1000_phy_igp_3) 5946 if (adapter->hw.phy.type == e1000_phy_igp_3)
5956 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 5947 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5957 5948
@@ -5960,27 +5951,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5960 */ 5951 */
5961 e1000e_release_hw_control(adapter); 5952 e1000e_release_hw_control(adapter);
5962 5953
5963 pci_disable_device(pdev);
5964
5965 return 0;
5966}
5967
5968static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5969{
5970 if (sleep && wake) {
5971 pci_prepare_to_sleep(pdev);
5972 return;
5973 }
5974
5975 pci_wake_from_d3(pdev, wake);
5976 pci_set_power_state(pdev, PCI_D3hot);
5977}
5978
5979static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, bool wake)
5980{
5981 struct net_device *netdev = pci_get_drvdata(pdev);
5982 struct e1000_adapter *adapter = netdev_priv(netdev);
5983
5984 /* The pci-e switch on some quad port adapters will report a 5954 /* The pci-e switch on some quad port adapters will report a
5985 * correctable error when the MAC transitions from D0 to D3. To 5955 * correctable error when the MAC transitions from D0 to D3. To
5986 * prevent this we need to mask off the correctable errors on the 5956 * prevent this we need to mask off the correctable errors on the
@@ -5994,12 +5964,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, bool wake)
5994 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, 5964 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
5995 (devctl & ~PCI_EXP_DEVCTL_CERE)); 5965 (devctl & ~PCI_EXP_DEVCTL_CERE));
5996 5966
5997 e1000_power_off(pdev, sleep, wake); 5967 pci_save_state(pdev);
5968 pci_prepare_to_sleep(pdev);
5998 5969
5999 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); 5970 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
6000 } else {
6001 e1000_power_off(pdev, sleep, wake);
6002 } 5971 }
5972
5973 return 0;
6003} 5974}
6004 5975
6005#ifdef CONFIG_PCIEASPM 5976#ifdef CONFIG_PCIEASPM
@@ -6057,9 +6028,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6057 if (aspm_disable_flag) 6028 if (aspm_disable_flag)
6058 e1000e_disable_aspm(pdev, aspm_disable_flag); 6029 e1000e_disable_aspm(pdev, aspm_disable_flag);
6059 6030
6060 pci_set_power_state(pdev, PCI_D0); 6031 pci_set_master(pdev);
6061 pci_restore_state(pdev);
6062 pci_save_state(pdev);
6063 6032
6064 e1000e_set_interrupt_capability(adapter); 6033 e1000e_set_interrupt_capability(adapter);
6065 if (netif_running(netdev)) { 6034 if (netif_running(netdev)) {
@@ -6125,14 +6094,8 @@ static int __e1000_resume(struct pci_dev *pdev)
6125static int e1000_suspend(struct device *dev) 6094static int e1000_suspend(struct device *dev)
6126{ 6095{
6127 struct pci_dev *pdev = to_pci_dev(dev); 6096 struct pci_dev *pdev = to_pci_dev(dev);
6128 int retval;
6129 bool wake;
6130
6131 retval = __e1000_shutdown(pdev, &wake, false);
6132 if (!retval)
6133 e1000_complete_shutdown(pdev, true, wake);
6134 6097
6135 return retval; 6098 return __e1000_shutdown(pdev, false);
6136} 6099}
6137 6100
6138static int e1000_resume(struct device *dev) 6101static int e1000_resume(struct device *dev)
@@ -6155,13 +6118,10 @@ static int e1000_runtime_suspend(struct device *dev)
6155 struct net_device *netdev = pci_get_drvdata(pdev); 6118 struct net_device *netdev = pci_get_drvdata(pdev);
6156 struct e1000_adapter *adapter = netdev_priv(netdev); 6119 struct e1000_adapter *adapter = netdev_priv(netdev);
6157 6120
6158 if (e1000e_pm_ready(adapter)) { 6121 if (!e1000e_pm_ready(adapter))
6159 bool wake; 6122 return 0;
6160
6161 __e1000_shutdown(pdev, &wake, true);
6162 }
6163 6123
6164 return 0; 6124 return __e1000_shutdown(pdev, true);
6165} 6125}
6166 6126
6167static int e1000_idle(struct device *dev) 6127static int e1000_idle(struct device *dev)
@@ -6199,12 +6159,7 @@ static int e1000_runtime_resume(struct device *dev)
6199 6159
6200static void e1000_shutdown(struct pci_dev *pdev) 6160static void e1000_shutdown(struct pci_dev *pdev)
6201{ 6161{
6202 bool wake = false; 6162 __e1000_shutdown(pdev, false);
6203
6204 __e1000_shutdown(pdev, &wake, false);
6205
6206 if (system_state == SYSTEM_POWER_OFF)
6207 e1000_complete_shutdown(pdev, false, wake);
6208} 6163}
6209 6164
6210#ifdef CONFIG_NET_POLL_CONTROLLER 6165#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6325,9 +6280,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6325 "Cannot re-enable PCI device after reset.\n"); 6280 "Cannot re-enable PCI device after reset.\n");
6326 result = PCI_ERS_RESULT_DISCONNECT; 6281 result = PCI_ERS_RESULT_DISCONNECT;
6327 } else { 6282 } else {
6328 pci_set_master(pdev);
6329 pdev->state_saved = true; 6283 pdev->state_saved = true;
6330 pci_restore_state(pdev); 6284 pci_restore_state(pdev);
6285 pci_set_master(pdev);
6331 6286
6332 pci_enable_wake(pdev, PCI_D3hot, 0); 6287 pci_enable_wake(pdev, PCI_D3hot, 0);
6333 pci_enable_wake(pdev, PCI_D3cold, 0); 6288 pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -6757,7 +6712,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6757 6712
6758 /* initialize the wol settings based on the eeprom settings */ 6713 /* initialize the wol settings based on the eeprom settings */
6759 adapter->wol = adapter->eeprom_wol; 6714 adapter->wol = adapter->eeprom_wol;
6760 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 6715
6716 /* make sure adapter isn't asleep if manageability is enabled */
6717 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
6718 (hw->mac.ops.check_mng_mode(hw)))
6719 device_wakeup_enable(&pdev->dev);
6761 6720
6762 /* save off EEPROM version number */ 6721 /* save off EEPROM version number */
6763 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 6722 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 794fe1497666..a7e6a3e37257 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -42,6 +42,7 @@
42#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ 42#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
43#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ 43#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
44#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ 44#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
45#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
45#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ 46#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
46#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 47#define E1000_FCT 0x00030 /* Flow Control Type - RW */
47#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ 48#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 84e7e0909def..b64542acfa34 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1361 switch (hw->phy.type) { 1361 switch (hw->phy.type) {
1362 case e1000_phy_i210: 1362 case e1000_phy_i210:
1363 case e1000_phy_m88: 1363 case e1000_phy_m88:
1364 if (hw->phy.id == I347AT4_E_PHY_ID || 1364 switch (hw->phy.id) {
1365 hw->phy.id == M88E1112_E_PHY_ID) 1365 case I347AT4_E_PHY_ID:
1366 case M88E1112_E_PHY_ID:
1367 case I210_I_PHY_ID:
1366 ret_val = igb_copper_link_setup_m88_gen2(hw); 1368 ret_val = igb_copper_link_setup_m88_gen2(hw);
1367 else 1369 break;
1370 default:
1368 ret_val = igb_copper_link_setup_m88(hw); 1371 ret_val = igb_copper_link_setup_m88(hw);
1372 break;
1373 }
1369 break; 1374 break;
1370 case e1000_phy_igp_3: 1375 case e1000_phy_igp_3:
1371 ret_val = igb_copper_link_setup_igp(hw); 1376 ret_val = igb_copper_link_setup_igp(hw);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index d27edbc63923..25151401c2ab 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -447,7 +447,7 @@ struct igb_adapter {
447#endif 447#endif
448 struct i2c_algo_bit_data i2c_algo; 448 struct i2c_algo_bit_data i2c_algo;
449 struct i2c_adapter i2c_adap; 449 struct i2c_adapter i2c_adap;
450 struct igb_i2c_client_list *i2c_clients; 450 struct i2c_client *i2c_client;
451}; 451};
452 452
453#define IGB_FLAG_HAS_MSI (1 << 0) 453#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 0a9b073d0b03..4623502054d5 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -39,6 +39,10 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40 40
41#ifdef CONFIG_IGB_HWMON 41#ifdef CONFIG_IGB_HWMON
42struct i2c_board_info i350_sensor_info = {
43 I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
44};
45
42/* hwmon callback functions */ 46/* hwmon callback functions */
43static ssize_t igb_hwmon_show_location(struct device *dev, 47static ssize_t igb_hwmon_show_location(struct device *dev,
44 struct device_attribute *attr, 48 struct device_attribute *attr,
@@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
188 unsigned int i; 192 unsigned int i;
189 int n_attrs; 193 int n_attrs;
190 int rc = 0; 194 int rc = 0;
195 struct i2c_client *client = NULL;
191 196
192 /* If this method isn't defined we don't support thermals */ 197 /* If this method isn't defined we don't support thermals */
193 if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) 198 if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
198 if (rc) 203 if (rc)
199 goto exit; 204 goto exit;
200 205
206 /* init i2c_client */
207 client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
208 if (client == NULL) {
209 dev_info(&adapter->pdev->dev,
210 "Failed to create new i2c device..\n");
211 goto exit;
212 }
213 adapter->i2c_client = client;
214
201 /* Allocation space for max attributes 215 /* Allocation space for max attributes
202 * max num sensors * values (loc, temp, max, caution) 216 * max num sensors * values (loc, temp, max, caution)
203 */ 217 */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ed79a1c53b59..4dbd62968c7a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1923 return; 1923 return;
1924} 1924}
1925 1925
1926static const struct i2c_board_info i350_sensor_info = {
1927 I2C_BOARD_INFO("i350bb", 0Xf8),
1928};
1929
1930/* igb_init_i2c - Init I2C interface 1926/* igb_init_i2c - Init I2C interface
1931 * @adapter: pointer to adapter structure 1927 * @adapter: pointer to adapter structure
1932 * 1928 *
@@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
6227 /* If we spanned a buffer we have a huge mess so test for it */ 6223 /* If we spanned a buffer we have a huge mess so test for it */
6228 BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); 6224 BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
6229 6225
6230 /* Guarantee this function can be used by verifying buffer sizes */
6231 BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
6232 NET_IP_ALIGN +
6233 IGB_TS_HDR_LEN +
6234 ETH_FRAME_LEN +
6235 ETH_FCS_LEN));
6236
6237 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 6226 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6238 page = rx_buffer->page; 6227 page = rx_buffer->page;
6239 prefetchw(page); 6228 prefetchw(page);
@@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7724 } 7713 }
7725} 7714}
7726 7715
7727static DEFINE_SPINLOCK(i2c_clients_lock);
7728
7729/* igb_get_i2c_client - returns matching client
7730 * in adapters's client list.
7731 * @adapter: adapter struct
7732 * @dev_addr: device address of i2c needed.
7733 */
7734static struct i2c_client *
7735igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
7736{
7737 ulong flags;
7738 struct igb_i2c_client_list *client_list;
7739 struct i2c_client *client = NULL;
7740 struct i2c_board_info client_info = {
7741 I2C_BOARD_INFO("igb", 0x00),
7742 };
7743
7744 spin_lock_irqsave(&i2c_clients_lock, flags);
7745 client_list = adapter->i2c_clients;
7746
7747 /* See if we already have an i2c_client */
7748 while (client_list) {
7749 if (client_list->client->addr == (dev_addr >> 1)) {
7750 client = client_list->client;
7751 goto exit;
7752 } else {
7753 client_list = client_list->next;
7754 }
7755 }
7756
7757 /* no client_list found, create a new one */
7758 client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
7759 if (client_list == NULL)
7760 goto exit;
7761
7762 /* dev_addr passed to us is left-shifted by 1 bit
7763 * i2c_new_device call expects it to be flush to the right.
7764 */
7765 client_info.addr = dev_addr >> 1;
7766 client_info.platform_data = adapter;
7767 client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
7768 if (client_list->client == NULL) {
7769 dev_info(&adapter->pdev->dev,
7770 "Failed to create new i2c device..\n");
7771 goto err_no_client;
7772 }
7773
7774 /* insert new client at head of list */
7775 client_list->next = adapter->i2c_clients;
7776 adapter->i2c_clients = client_list;
7777
7778 client = client_list->client;
7779 goto exit;
7780
7781err_no_client:
7782 kfree(client_list);
7783exit:
7784 spin_unlock_irqrestore(&i2c_clients_lock, flags);
7785 return client;
7786}
7787
7788/* igb_read_i2c_byte - Reads 8 bit word over I2C 7716/* igb_read_i2c_byte - Reads 8 bit word over I2C
7789 * @hw: pointer to hardware structure 7717 * @hw: pointer to hardware structure
7790 * @byte_offset: byte offset to read 7718 * @byte_offset: byte offset to read
@@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7798 u8 dev_addr, u8 *data) 7726 u8 dev_addr, u8 *data)
7799{ 7727{
7800 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7728 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7801 struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); 7729 struct i2c_client *this_client = adapter->i2c_client;
7802 s32 status; 7730 s32 status;
7803 u16 swfw_mask = 0; 7731 u16 swfw_mask = 0;
7804 7732
@@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7835 u8 dev_addr, u8 data) 7763 u8 dev_addr, u8 data)
7836{ 7764{
7837 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7765 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7838 struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); 7766 struct i2c_client *this_client = adapter->i2c_client;
7839 s32 status; 7767 s32 status;
7840 u16 swfw_mask = E1000_SWFW_PHY0_SM; 7768 u16 swfw_mask = E1000_SWFW_PHY0_SM;
7841 7769
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 29140502b71a..6562c736a1d8 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1081 1081
1082 1082
1083/* mii management interface *************************************************/ 1083/* mii management interface *************************************************/
1084static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
1085{
1086 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1087 u32 autoneg_disable = FORCE_LINK_PASS |
1088 DISABLE_AUTO_NEG_SPEED_GMII |
1089 DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1090 DISABLE_AUTO_NEG_FOR_DUPLEX;
1091
1092 if (mp->phy->autoneg == AUTONEG_ENABLE) {
1093 /* enable auto negotiation */
1094 pscr &= ~autoneg_disable;
1095 goto out_write;
1096 }
1097
1098 pscr |= autoneg_disable;
1099
1100 if (mp->phy->speed == SPEED_1000) {
1101 /* force gigabit, half duplex not supported */
1102 pscr |= SET_GMII_SPEED_TO_1000;
1103 pscr |= SET_FULL_DUPLEX_MODE;
1104 goto out_write;
1105 }
1106
1107 pscr &= ~SET_GMII_SPEED_TO_1000;
1108
1109 if (mp->phy->speed == SPEED_100)
1110 pscr |= SET_MII_SPEED_TO_100;
1111 else
1112 pscr &= ~SET_MII_SPEED_TO_100;
1113
1114 if (mp->phy->duplex == DUPLEX_FULL)
1115 pscr |= SET_FULL_DUPLEX_MODE;
1116 else
1117 pscr &= ~SET_FULL_DUPLEX_MODE;
1118
1119out_write:
1120 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1121}
1122
1084static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1123static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
1085{ 1124{
1086 struct mv643xx_eth_shared_private *msp = dev_id; 1125 struct mv643xx_eth_shared_private *msp = dev_id;
@@ -1499,6 +1538,7 @@ static int
1499mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1538mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1500{ 1539{
1501 struct mv643xx_eth_private *mp = netdev_priv(dev); 1540 struct mv643xx_eth_private *mp = netdev_priv(dev);
1541 int ret;
1502 1542
1503 if (mp->phy == NULL) 1543 if (mp->phy == NULL)
1504 return -EINVAL; 1544 return -EINVAL;
@@ -1508,7 +1548,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1508 */ 1548 */
1509 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1549 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1510 1550
1511 return phy_ethtool_sset(mp->phy, cmd); 1551 ret = phy_ethtool_sset(mp->phy, cmd);
1552 if (!ret)
1553 mv643xx_adjust_pscr(mp);
1554 return ret;
1512} 1555}
1513 1556
1514static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1557static void mv643xx_eth_get_drvinfo(struct net_device *dev,
@@ -2442,11 +2485,15 @@ static int mv643xx_eth_stop(struct net_device *dev)
2442static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2485static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2443{ 2486{
2444 struct mv643xx_eth_private *mp = netdev_priv(dev); 2487 struct mv643xx_eth_private *mp = netdev_priv(dev);
2488 int ret;
2445 2489
2446 if (mp->phy != NULL) 2490 if (mp->phy == NULL)
2447 return phy_mii_ioctl(mp->phy, ifr, cmd); 2491 return -ENOTSUPP;
2448 2492
2449 return -EOPNOTSUPP; 2493 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2494 if (!ret)
2495 mv643xx_adjust_pscr(mp);
2496 return ret;
2450} 2497}
2451 2498
2452static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2499static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 7e64033d7de3..0706623cfb96 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -226,7 +226,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
226 226
227static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) 227static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
228{ 228{
229 u64 in_param; 229 u64 in_param = 0;
230 int err; 230 int err;
231 231
232 if (mlx4_is_mfunc(dev)) { 232 if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4c37d487bb03..47b996096559 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -565,34 +565,38 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
565 struct mlx4_en_dev *mdev = priv->mdev; 565 struct mlx4_en_dev *mdev = priv->mdev;
566 struct mlx4_dev *dev = mdev->dev; 566 struct mlx4_dev *dev = mdev->dev;
567 int qpn = priv->base_qpn; 567 int qpn = priv->base_qpn;
568 u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); 568 u64 mac;
569
570 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
571 priv->dev->dev_addr);
572 mlx4_unregister_mac(dev, priv->port, mac);
573 569
574 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 570 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
571 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
572 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
573 priv->dev->dev_addr);
574 mlx4_unregister_mac(dev, priv->port, mac);
575 } else {
575 struct mlx4_mac_entry *entry; 576 struct mlx4_mac_entry *entry;
576 struct hlist_node *tmp; 577 struct hlist_node *tmp;
577 struct hlist_head *bucket; 578 struct hlist_head *bucket;
578 unsigned int mac_hash; 579 unsigned int i;
579 580
580 mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; 581 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
581 bucket = &priv->mac_hash[mac_hash]; 582 bucket = &priv->mac_hash[i];
582 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 583 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
583 if (ether_addr_equal_64bits(entry->mac, 584 mac = mlx4_en_mac_to_u64(entry->mac);
584 priv->dev->dev_addr)) { 585 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
585 en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", 586 entry->mac);
586 priv->port, priv->dev->dev_addr, qpn);
587 mlx4_en_uc_steer_release(priv, entry->mac, 587 mlx4_en_uc_steer_release(priv, entry->mac,
588 qpn, entry->reg_id); 588 qpn, entry->reg_id);
589 mlx4_qp_release_range(dev, qpn, 1);
590 589
590 mlx4_unregister_mac(dev, priv->port, mac);
591 hlist_del_rcu(&entry->hlist); 591 hlist_del_rcu(&entry->hlist);
592 kfree_rcu(entry, rcu); 592 kfree_rcu(entry, rcu);
593 break;
594 } 593 }
595 } 594 }
595
596 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
597 priv->port, qpn);
598 mlx4_qp_release_range(dev, qpn, 1);
599 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
596 } 600 }
597} 601}
598 602
@@ -650,28 +654,10 @@ u64 mlx4_en_mac_to_u64(u8 *addr)
650 return mac; 654 return mac;
651} 655}
652 656
653static int mlx4_en_set_mac(struct net_device *dev, void *addr) 657static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
654{
655 struct mlx4_en_priv *priv = netdev_priv(dev);
656 struct mlx4_en_dev *mdev = priv->mdev;
657 struct sockaddr *saddr = addr;
658
659 if (!is_valid_ether_addr(saddr->sa_data))
660 return -EADDRNOTAVAIL;
661
662 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
663 queue_work(mdev->workqueue, &priv->mac_task);
664 return 0;
665}
666
667static void mlx4_en_do_set_mac(struct work_struct *work)
668{ 658{
669 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
670 mac_task);
671 struct mlx4_en_dev *mdev = priv->mdev;
672 int err = 0; 659 int err = 0;
673 660
674 mutex_lock(&mdev->state_lock);
675 if (priv->port_up) { 661 if (priv->port_up) {
676 /* Remove old MAC and insert the new one */ 662 /* Remove old MAC and insert the new one */
677 err = mlx4_en_replace_mac(priv, priv->base_qpn, 663 err = mlx4_en_replace_mac(priv, priv->base_qpn,
@@ -683,7 +669,26 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
683 } else 669 } else
684 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 670 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
685 671
672 return err;
673}
674
675static int mlx4_en_set_mac(struct net_device *dev, void *addr)
676{
677 struct mlx4_en_priv *priv = netdev_priv(dev);
678 struct mlx4_en_dev *mdev = priv->mdev;
679 struct sockaddr *saddr = addr;
680 int err;
681
682 if (!is_valid_ether_addr(saddr->sa_data))
683 return -EADDRNOTAVAIL;
684
685 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
686
687 mutex_lock(&mdev->state_lock);
688 err = mlx4_en_do_set_mac(priv);
686 mutex_unlock(&mdev->state_lock); 689 mutex_unlock(&mdev->state_lock);
690
691 return err;
687} 692}
688 693
689static void mlx4_en_clear_list(struct net_device *dev) 694static void mlx4_en_clear_list(struct net_device *dev)
@@ -1348,7 +1353,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
1348 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1353 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1349 } 1354 }
1350 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1355 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1351 queue_work(mdev->workqueue, &priv->mac_task); 1356 mlx4_en_do_set_mac(priv);
1352 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1357 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1353 } 1358 }
1354 mutex_unlock(&mdev->state_lock); 1359 mutex_unlock(&mdev->state_lock);
@@ -1828,9 +1833,11 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1828 } 1833 }
1829 1834
1830#ifdef CONFIG_RFS_ACCEL 1835#ifdef CONFIG_RFS_ACCEL
1831 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); 1836 if (priv->mdev->dev->caps.comp_pool) {
1832 if (!priv->dev->rx_cpu_rmap) 1837 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
1833 goto err; 1838 if (!priv->dev->rx_cpu_rmap)
1839 goto err;
1840 }
1834#endif 1841#endif
1835 1842
1836 return 0; 1843 return 0;
@@ -2002,7 +2009,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2002 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2009 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2003 spin_lock_init(&priv->stats_lock); 2010 spin_lock_init(&priv->stats_lock);
2004 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2011 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2005 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
2006 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2012 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2007 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2013 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2008 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2014 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 50917eb3013e..f6245579962d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -787,6 +787,14 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
787 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 787 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
788 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 788 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
789 789
790 /* turn off device-managed steering capability if not enabled */
791 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
792 MLX4_GET(field, outbox->buf,
793 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
794 field &= 0x7f;
795 MLX4_PUT(outbox->buf, field,
796 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
797 }
790 return 0; 798 return 0;
791} 799}
792 800
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d180bc46826a..16abde20e1fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1555,7 +1555,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1555 1555
1556void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1556void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1557{ 1557{
1558 u64 in_param; 1558 u64 in_param = 0;
1559 1559
1560 if (mlx4_is_mfunc(dev)) { 1560 if (mlx4_is_mfunc(dev)) {
1561 set_param_l(&in_param, idx); 1561 set_param_l(&in_param, idx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf883345af88..d738454116a0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1235,7 +1235,7 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
1235 1235
1236static inline void set_param_l(u64 *arg, u32 val) 1236static inline void set_param_l(u64 *arg, u32 val)
1237{ 1237{
1238 *((u32 *)arg) = val; 1238 *arg = (*arg & 0xffffffff00000000ULL) | (u64) val;
1239} 1239}
1240 1240
1241static inline void set_param_h(u64 *arg, u32 val) 1241static inline void set_param_h(u64 *arg, u32 val)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c313d7e943a9..f710b7ce0dcb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -509,7 +509,6 @@ struct mlx4_en_priv {
509 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 509 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
510 struct mlx4_qp drop_qp; 510 struct mlx4_qp drop_qp;
511 struct work_struct rx_mode_task; 511 struct work_struct rx_mode_task;
512 struct work_struct mac_task;
513 struct work_struct watchdog_task; 512 struct work_struct watchdog_task;
514 struct work_struct linkstate_task; 513 struct work_struct linkstate_task;
515 struct delayed_work stats_task; 514 struct delayed_work stats_task;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 602ca9bf78e4..f91719a08cba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -183,7 +183,7 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
183 183
184static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 184static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
185{ 185{
186 u64 in_param; 186 u64 in_param = 0;
187 u64 out_param; 187 u64 out_param;
188 int err; 188 int err;
189 189
@@ -240,7 +240,7 @@ void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
240 240
241static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 241static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
242{ 242{
243 u64 in_param; 243 u64 in_param = 0;
244 int err; 244 int err;
245 245
246 if (mlx4_is_mfunc(dev)) { 246 if (mlx4_is_mfunc(dev)) {
@@ -351,7 +351,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
351 351
352static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) 352static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
353{ 353{
354 u64 in_param; 354 u64 in_param = 0;
355 355
356 if (mlx4_is_mfunc(dev)) { 356 if (mlx4_is_mfunc(dev)) {
357 set_param_l(&in_param, index); 357 set_param_l(&in_param, index);
@@ -374,7 +374,7 @@ int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
374 374
375static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) 375static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
376{ 376{
377 u64 param; 377 u64 param = 0;
378 378
379 if (mlx4_is_mfunc(dev)) { 379 if (mlx4_is_mfunc(dev)) {
380 set_param_l(&param, index); 380 set_param_l(&param, index);
@@ -395,7 +395,7 @@ void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
395 395
396static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) 396static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
397{ 397{
398 u64 in_param; 398 u64 in_param = 0;
399 399
400 if (mlx4_is_mfunc(dev)) { 400 if (mlx4_is_mfunc(dev)) {
401 set_param_l(&in_param, index); 401 set_param_l(&in_param, index);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 1ac88637ad9d..00f223acada7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -101,7 +101,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
101 101
102void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) 102void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
103{ 103{
104 u64 in_param; 104 u64 in_param = 0;
105 int err; 105 int err;
106 106
107 if (mlx4_is_mfunc(dev)) { 107 if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 719ead15e491..10c57c86388b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
175 175
176int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 176int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
177{ 177{
178 u64 out_param; 178 u64 out_param = 0;
179 int err; 179 int err;
180 180
181 if (mlx4_is_mfunc(dev)) { 181 if (mlx4_is_mfunc(dev)) {
@@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
222 222
223void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 223void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
224{ 224{
225 u64 out_param; 225 u64 out_param = 0;
226 226
227 if (mlx4_is_mfunc(dev)) { 227 if (mlx4_is_mfunc(dev)) {
228 set_param_l(&out_param, port); 228 set_param_l(&out_param, port);
@@ -361,7 +361,7 @@ out:
361 361
362int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 362int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
363{ 363{
364 u64 out_param; 364 u64 out_param = 0;
365 int err; 365 int err;
366 366
367 if (mlx4_is_mfunc(dev)) { 367 if (mlx4_is_mfunc(dev)) {
@@ -406,7 +406,7 @@ out:
406 406
407void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 407void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
408{ 408{
409 u64 in_param; 409 u64 in_param = 0;
410 int err; 410 int err;
411 411
412 if (mlx4_is_mfunc(dev)) { 412 if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 81e2abe07bbb..e891b058c1be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -222,7 +222,7 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
222 222
223int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) 223int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
224{ 224{
225 u64 in_param; 225 u64 in_param = 0;
226 u64 out_param; 226 u64 out_param;
227 int err; 227 int err;
228 228
@@ -255,7 +255,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
255 255
256void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 256void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
257{ 257{
258 u64 in_param; 258 u64 in_param = 0;
259 int err; 259 int err;
260 260
261 if (mlx4_is_mfunc(dev)) { 261 if (mlx4_is_mfunc(dev)) {
@@ -319,7 +319,7 @@ err_out:
319 319
320static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) 320static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
321{ 321{
322 u64 param; 322 u64 param = 0;
323 323
324 if (mlx4_is_mfunc(dev)) { 324 if (mlx4_is_mfunc(dev)) {
325 set_param_l(&param, qpn); 325 set_param_l(&param, qpn);
@@ -344,7 +344,7 @@ void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
344 344
345static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 345static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
346{ 346{
347 u64 in_param; 347 u64 in_param = 0;
348 348
349 if (mlx4_is_mfunc(dev)) { 349 if (mlx4_is_mfunc(dev)) {
350 set_param_l(&in_param, qpn); 350 set_param_l(&in_param, qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 083fb48dc3d7..2995687f1aee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2990,6 +2990,9 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2990 u8 steer_type_mask = 2; 2990 u8 steer_type_mask = 2;
2991 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; 2991 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2992 2992
2993 if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0)
2994 return -EINVAL;
2995
2993 qpn = vhcr->in_modifier & 0xffffff; 2996 qpn = vhcr->in_modifier & 0xffffff;
2994 err = get_res(dev, slave, qpn, RES_QP, &rqp); 2997 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2995 if (err) 2998 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index feda6c00829f..e329fe1f11b7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -149,7 +149,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
149 149
150static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) 150static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
151{ 151{
152 u64 in_param; 152 u64 in_param = 0;
153 153
154 if (mlx4_is_mfunc(dev)) { 154 if (mlx4_is_mfunc(dev)) {
155 set_param_l(&in_param, srqn); 155 set_param_l(&in_param, srqn);
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index e5b19b056909..3c4d6274bb9b 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -202,6 +202,9 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
202 return 0; 202 return 0;
203 203
204 out: 204 out:
205 if (rrpriv->evt_ring)
206 pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring,
207 rrpriv->evt_ring_dma);
205 if (rrpriv->rx_ring) 208 if (rrpriv->rx_ring)
206 pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, 209 pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
207 rrpriv->rx_ring_dma); 210 rrpriv->rx_ring_dma);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 417b2af1aa80..73abbc1655d5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -660,6 +660,7 @@ void macvlan_common_setup(struct net_device *dev)
660 ether_setup(dev); 660 ether_setup(dev);
661 661
662 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 662 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
663 dev->priv_flags |= IFF_UNICAST_FLT;
663 dev->netdev_ops = &macvlan_netdev_ops; 664 dev->netdev_ops = &macvlan_netdev_ops;
664 dev->destructor = free_netdev; 665 dev->destructor = free_netdev;
665 dev->header_ops = &macvlan_hard_header_ops, 666 dev->header_ops = &macvlan_hard_header_ops,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6ba0883b9c36..621c1bddeee9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1151,6 +1151,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1151 netdev_upper_dev_unlink(port_dev, dev); 1151 netdev_upper_dev_unlink(port_dev, dev);
1152 team_port_disable_netpoll(port); 1152 team_port_disable_netpoll(port);
1153 vlan_vids_del_by_dev(port_dev, dev); 1153 vlan_vids_del_by_dev(port_dev, dev);
1154 dev_uc_unsync(port_dev, dev);
1155 dev_mc_unsync(port_dev, dev);
1154 dev_close(port_dev); 1156 dev_close(port_dev);
1155 team_port_leave(team, port); 1157 team_port_leave(team, port);
1156 1158
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2c6a22e278ea..b7c457adc0dc 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -747,6 +747,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
747 goto drop; 747 goto drop;
748 skb_orphan(skb); 748 skb_orphan(skb);
749 749
750 nf_reset(skb);
751
750 /* Enqueue packet */ 752 /* Enqueue packet */
751 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); 753 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
752 754
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 4aad350e4dae..eae7a03d4f9b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2958,6 +2958,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2958 2958
2959 adapter->num_rx_queues = num_rx_queues; 2959 adapter->num_rx_queues = num_rx_queues;
2960 adapter->num_tx_queues = num_tx_queues; 2960 adapter->num_tx_queues = num_tx_queues;
2961 adapter->rx_buf_per_pkt = 1;
2961 2962
2962 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 2963 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2963 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; 2964 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index a0feb17a0238..63a124340cbe 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -472,6 +472,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
472 VMXNET3_RX_RING_MAX_SIZE) 472 VMXNET3_RX_RING_MAX_SIZE)
473 return -EINVAL; 473 return -EINVAL;
474 474
475 /* if adapter not yet initialized, do nothing */
476 if (adapter->rx_buf_per_pkt == 0) {
477 netdev_err(netdev, "adapter not completely initialized, "
478 "ring size cannot be changed yet\n");
479 return -EOPNOTSUPP;
480 }
475 481
476 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ 482 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
477 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & 483 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3198384689d9..35418146fa17 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -70,10 +70,10 @@
70/* 70/*
71 * Version numbers 71 * Version numbers
72 */ 72 */
73#define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k" 73#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k"
74 74
75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
76#define VMXNET3_DRIVER_VERSION_NUM 0x01011D00 76#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00
77 77
78#if defined(CONFIG_PCI_MSI) 78#if defined(CONFIG_PCI_MSI)
79 /* RSS only makes sense if MSI-X is supported. */ 79 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f3a135cb50a9..db0df07c18dc 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -974,6 +974,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
974 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 974 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
975 tunnel_ip_select_ident(skb, old_iph, &rt->dst); 975 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
976 976
977 nf_reset(skb);
978
977 vxlan_set_owner(dev, skb); 979 vxlan_set_owner(dev, skb);
978 980
979 if (handle_offloads(skb)) 981 if (handle_offloads(skb))
@@ -1507,6 +1509,14 @@ static __net_init int vxlan_init_net(struct net *net)
1507static __net_exit void vxlan_exit_net(struct net *net) 1509static __net_exit void vxlan_exit_net(struct net *net)
1508{ 1510{
1509 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1511 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1512 struct vxlan_dev *vxlan;
1513 unsigned h;
1514
1515 rtnl_lock();
1516 for (h = 0; h < VNI_HASH_SIZE; ++h)
1517 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist)
1518 dev_close(vxlan->dev);
1519 rtnl_unlock();
1510 1520
1511 if (vn->sock) { 1521 if (vn->sock) {
1512 sk_release_kernel(vn->sock->sk); 1522 sk_release_kernel(vn->sock->sk);
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 94ef33838bc6..b775769f8322 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
151 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); 151 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
152 152
153 if (!(flags & CMD_ASYNC)) { 153 if (!(flags & CMD_ASYNC)) {
154 cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD; 154 cmd.flags |= CMD_WANT_SKB;
155 might_sleep(); 155 might_sleep();
156 } 156 }
157 157
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 10f01793d7a6..81aa91fab5aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -363,7 +363,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
363 __entry->flags = cmd->flags; 363 __entry->flags = cmd->flags;
364 memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); 364 memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
365 365
366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 366 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
367 if (!cmd->len[i]) 367 if (!cmd->len[i])
368 continue; 368 continue;
369 memcpy((u8 *)__get_dynamic_array(hcmd) + offset, 369 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 6f228bb2b844..fbfd2d137117 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
1102 1102
1103/* shared module parameters */ 1103/* shared module parameters */
1104struct iwl_mod_params iwlwifi_mod_params = { 1104struct iwl_mod_params iwlwifi_mod_params = {
1105 .amsdu_size_8K = 1,
1106 .restart_fw = 1, 1105 .restart_fw = 1,
1107 .plcp_check = true, 1106 .plcp_check = true,
1108 .bt_coex_active = true, 1107 .bt_coex_active = true,
@@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable,
1207 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); 1206 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
1208module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, 1207module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1209 int, S_IRUGO); 1208 int, S_IRUGO);
1210MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 1209MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
1211module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); 1210module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
1212MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 1211MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
1213 1212
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index e5e3a79eae2f..2c2a729092f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -91,7 +91,7 @@ enum iwl_power_level {
91 * @sw_crypto: using hardware encryption, default = 0 91 * @sw_crypto: using hardware encryption, default = 0
92 * @disable_11n: disable 11n capabilities, default = 0, 92 * @disable_11n: disable 11n capabilities, default = 0,
93 * use IWL_DISABLE_HT_* constants 93 * use IWL_DISABLE_HT_* constants
94 * @amsdu_size_8K: enable 8K amsdu size, default = 1 94 * @amsdu_size_8K: enable 8K amsdu size, default = 0
95 * @restart_fw: restart firmware, default = 1 95 * @restart_fw: restart firmware, default = 1
96 * @plcp_check: enable plcp health check, default = true 96 * @plcp_check: enable plcp health check, default = true
97 * @wd_disable: enable stuck queue check, default = 0 97 * @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8c7bec6b9a0b..0cac2b7af78b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -186,19 +186,13 @@ struct iwl_rx_packet {
186 * @CMD_ASYNC: Return right away and don't want for the response 186 * @CMD_ASYNC: Return right away and don't want for the response
187 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 187 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
188 * response. The caller needs to call iwl_free_resp when done. 188 * response. The caller needs to call iwl_free_resp when done.
189 * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
190 * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
191 * copied. The pointer passed to the response handler is in the transport
192 * ownership and don't need to be freed by the op_mode. This also means
193 * that the pointer is invalidated after the op_mode's handler returns.
194 * @CMD_ON_DEMAND: This command is sent by the test mode pipe. 189 * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
195 */ 190 */
196enum CMD_MODE { 191enum CMD_MODE {
197 CMD_SYNC = 0, 192 CMD_SYNC = 0,
198 CMD_ASYNC = BIT(0), 193 CMD_ASYNC = BIT(0),
199 CMD_WANT_SKB = BIT(1), 194 CMD_WANT_SKB = BIT(1),
200 CMD_WANT_HCMD = BIT(2), 195 CMD_ON_DEMAND = BIT(2),
201 CMD_ON_DEMAND = BIT(3),
202}; 196};
203 197
204#define DEF_CMD_PAYLOAD_SIZE 320 198#define DEF_CMD_PAYLOAD_SIZE 320
@@ -217,7 +211,11 @@ struct iwl_device_cmd {
217 211
218#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 212#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
219 213
220#define IWL_MAX_CMD_TFDS 2 214/*
215 * number of transfer buffers (fragments) per transmit frame descriptor;
216 * this is just the driver's idea, the hardware supports 20
217 */
218#define IWL_MAX_CMD_TBS_PER_TFD 2
221 219
222/** 220/**
223 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command 221 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
@@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag {
254 * @id: id of the host command 252 * @id: id of the host command
255 */ 253 */
256struct iwl_host_cmd { 254struct iwl_host_cmd {
257 const void *data[IWL_MAX_CMD_TFDS]; 255 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
258 struct iwl_rx_packet *resp_pkt; 256 struct iwl_rx_packet *resp_pkt;
259 unsigned long _rx_page_addr; 257 unsigned long _rx_page_addr;
260 u32 _rx_page_order; 258 u32 _rx_page_order;
261 int handler_status; 259 int handler_status;
262 260
263 u32 flags; 261 u32 flags;
264 u16 len[IWL_MAX_CMD_TFDS]; 262 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
265 u8 dataflags[IWL_MAX_CMD_TFDS]; 263 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
266 u8 id; 264 u8 id;
267}; 265};
268 266
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 23eebda848b0..2adb61f103f4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -762,18 +762,20 @@ struct iwl_phy_context_cmd {
762#define IWL_RX_INFO_PHY_CNT 8 762#define IWL_RX_INFO_PHY_CNT 8
763#define IWL_RX_INFO_AGC_IDX 1 763#define IWL_RX_INFO_AGC_IDX 1
764#define IWL_RX_INFO_RSSI_AB_IDX 2 764#define IWL_RX_INFO_RSSI_AB_IDX 2
765#define IWL_RX_INFO_RSSI_C_IDX 3 765#define IWL_OFDM_AGC_A_MSK 0x0000007f
766#define IWL_OFDM_AGC_DB_MSK 0xfe00 766#define IWL_OFDM_AGC_A_POS 0
767#define IWL_OFDM_AGC_DB_POS 9 767#define IWL_OFDM_AGC_B_MSK 0x00003f80
768#define IWL_OFDM_AGC_B_POS 7
769#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
770#define IWL_OFDM_AGC_CODE_POS 20
768#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff 771#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
769#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
770#define IWL_OFDM_RSSI_A_POS 0 772#define IWL_OFDM_RSSI_A_POS 0
773#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
774#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
771#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000 775#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
772#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
773#define IWL_OFDM_RSSI_B_POS 16 776#define IWL_OFDM_RSSI_B_POS 16
774#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff 777#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
775#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00 778#define IWL_OFDM_RSSI_ALLBAND_B_POS 24
776#define IWL_OFDM_RSSI_C_POS 0
777 779
778/** 780/**
779 * struct iwl_rx_phy_info - phy info 781 * struct iwl_rx_phy_info - phy info
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index d3d959db03a9..500f818dba04 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -79,17 +79,8 @@
79#define UCODE_VALID_OK cpu_to_le32(0x1) 79#define UCODE_VALID_OK cpu_to_le32(0x1)
80 80
81/* Default calibration values for WkP - set to INIT image w/o running */ 81/* Default calibration values for WkP - set to INIT image w/o running */
82static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
83 0x00, 0x18, 0x00 };
84static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
85 0x7f, 0x7f, 0x7f };
86static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
87static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
88 0x00 };
89static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
90static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; 82static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
91static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; 83static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
92static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
93 84
94struct iwl_calib_default_data { 85struct iwl_calib_default_data {
95 u16 size; 86 u16 size;
@@ -99,12 +90,7 @@ struct iwl_calib_default_data {
99#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} 90#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
100 91
101static const struct iwl_calib_default_data wkp_calib_default_data[12] = { 92static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
102 [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
103 [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
104 [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
105 [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
106 [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), 93 [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
107 [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
108 [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), 94 [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
109}; 95};
110 96
@@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
241 227
242 return 0; 228 return 0;
243} 229}
244#define IWL_HW_REV_ID_RAINBOW 0x2
245#define IWL_PROJ_TYPE_LHP 0x5
246
247static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
248{
249 struct iwl_nvm_data *data = mvm->nvm_data;
250 /* Temp calls to static definitions, will be changed to CSR calls */
251 u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
252 u8 project_type = IWL_PROJ_TYPE_LHP;
253
254 return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
255 (hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
256 (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
257}
258 230
259static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) 231static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
260{ 232{
@@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
262 enum iwl_ucode_type ucode_type = mvm->cur_ucode; 234 enum iwl_ucode_type ucode_type = mvm->cur_ucode;
263 235
264 /* Set parameters */ 236 /* Set parameters */
265 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm)); 237 phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config);
266 phy_cfg_cmd.calib_control.event_trigger = 238 phy_cfg_cmd.calib_control.event_trigger =
267 mvm->fw->default_calib[ucode_type].event_trigger; 239 mvm->fw->default_calib[ucode_type].event_trigger;
268 phy_cfg_cmd.calib_control.flow_trigger = 240 phy_cfg_cmd.calib_control.flow_trigger =
@@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
275 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 247 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
276} 248}
277 249
278/* Starting with the new PHY DB implementation - New calibs are enabled */
279/* Value - 0x405e7 */
280#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\
281 IWL_CALIB_CFG_TEMPERATURE_IDX |\
282 IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
283 IWL_CALIB_CFG_DC_IDX |\
284 IWL_CALIB_CFG_BB_FILTER_IDX |\
285 IWL_CALIB_CFG_LO_LEAKAGE_IDX |\
286 IWL_CALIB_CFG_TX_IQ_IDX |\
287 IWL_CALIB_CFG_RX_IQ_IDX |\
288 IWL_CALIB_CFG_AGC_IDX)
289
290#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0
291
292/* Value 0x41567 */
293#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\
294 IWL_CALIB_CFG_TEMPERATURE_IDX |\
295 IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
296 IWL_CALIB_CFG_BB_FILTER_IDX |\
297 IWL_CALIB_CFG_DC_IDX |\
298 IWL_CALIB_CFG_TX_IQ_IDX |\
299 IWL_CALIB_CFG_RX_IQ_IDX |\
300 IWL_CALIB_CFG_SENSITIVITY_IDX |\
301 IWL_CALIB_CFG_AGC_IDX)
302
303#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\
304 IWL_CALIB_CFG_TEMPERATURE_IDX |\
305 IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
306 IWL_CALIB_CFG_TX_PWR_IDX |\
307 IWL_CALIB_CFG_DC_IDX |\
308 IWL_CALIB_CFG_TX_IQ_IDX |\
309 IWL_CALIB_CFG_SENSITIVITY_IDX)
310
311/*
312 * Sets the calibrations trigger values that will be sent to the FW for runtime
313 * and init calibrations.
314 * The ones given in the FW TLV are not correct.
315 */
316static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
317{
318 struct iwl_tlv_calib_ctrl default_calib;
319
320 /*
321 * WkP FW TLV calib bits are wrong, overwrite them.
322 * This defines the dynamic calibrations which are implemented in the
323 * uCode both for init(flow) calculation and event driven calibs.
324 */
325
326 /* Init Image */
327 default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
328 default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
329
330 if (default_calib.event_trigger !=
331 mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
332 IWL_ERR(mvm,
333 "Updating the event calib for INIT image: 0x%x -> 0x%x\n",
334 mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
335 default_calib.event_trigger);
336 if (default_calib.flow_trigger !=
337 mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
338 IWL_ERR(mvm,
339 "Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
340 mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
341 default_calib.flow_trigger);
342
343 memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
344 &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
345 IWL_ERR(mvm,
346 "Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
347 default_calib.event_trigger,
348 default_calib.flow_trigger);
349
350 /* Run time image */
351 default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
352 default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
353
354 if (default_calib.event_trigger !=
355 mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
356 IWL_ERR(mvm,
357 "Updating the event calib for RT image: 0x%x -> 0x%x\n",
358 mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
359 default_calib.event_trigger);
360 if (default_calib.flow_trigger !=
361 mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
362 IWL_ERR(mvm,
363 "Updating the flow calib for RT image: 0x%x -> 0x%x\n",
364 mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
365 default_calib.flow_trigger);
366
367 memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
368 &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
369 IWL_ERR(mvm,
370 "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
371 default_calib.event_trigger,
372 default_calib.flow_trigger);
373}
374
375static int iwl_set_default_calibrations(struct iwl_mvm *mvm) 250static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
376{ 251{
377 u8 cmd_raw[16]; /* holds the variable size commands */ 252 u8 cmd_raw[16]; /* holds the variable size commands */
@@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
446 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 321 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
447 WARN_ON(ret); 322 WARN_ON(ret);
448 323
449 /* Override the calibrations from TLV and the const of fw */ 324 /* Send TX valid antennas before triggering calibrations */
450 iwl_set_default_calib_trigger(mvm); 325 ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
326 if (ret)
327 goto error;
451 328
452 /* WkP doesn't have all calibrations, need to set default values */ 329 /* WkP doesn't have all calibrations, need to set default values */
453 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 330 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 537711b10478..bdae700c769e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -80,7 +80,8 @@
80 80
81#define IWL_INVALID_MAC80211_QUEUE 0xff 81#define IWL_INVALID_MAC80211_QUEUE 0xff
82#define IWL_MVM_MAX_ADDRESSES 2 82#define IWL_MVM_MAX_ADDRESSES 2
83#define IWL_RSSI_OFFSET 44 83/* RSSI offset for WkP */
84#define IWL_RSSI_OFFSET 50
84 85
85enum iwl_mvm_tx_fifo { 86enum iwl_mvm_tx_fifo {
86 IWL_MVM_TX_FIFO_BK = 0, 87 IWL_MVM_TX_FIFO_BK = 0,
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index aa59adf87db3..d0f9c1e0475e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
624 ieee80211_free_txskb(mvm->hw, skb); 624 ieee80211_free_txskb(mvm->hw, skb);
625} 625}
626 626
627static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) 627static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
628{ 628{
629 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
630
631 iwl_mvm_dump_nic_error_log(mvm);
632
633 iwl_abort_notification_waits(&mvm->notif_wait); 629 iwl_abort_notification_waits(&mvm->notif_wait);
634 630
635 /* 631 /*
@@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
663 } 659 }
664} 660}
665 661
662static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
663{
664 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
665
666 iwl_mvm_dump_nic_error_log(mvm);
667
668 iwl_mvm_nic_restart(mvm);
669}
670
666static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) 671static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
667{ 672{
673 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
674
668 WARN_ON(1); 675 WARN_ON(1);
676 iwl_mvm_nic_restart(mvm);
669} 677}
670 678
671static const struct iwl_op_mode_ops iwl_mvm_ops = { 679static const struct iwl_op_mode_ops iwl_mvm_ops = {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 3f40ab05bbd8..b0b190d0ec23 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
131static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, 131static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
132 struct iwl_rx_phy_info *phy_info) 132 struct iwl_rx_phy_info *phy_info)
133{ 133{
134 u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db; 134 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
135 int rssi_all_band_a, rssi_all_band_b;
136 u32 agc_a, agc_b, max_agc;
135 u32 val; 137 u32 val;
136 138
137 /* Find max rssi among 3 possible receivers. 139 /* Find max rssi among 2 possible receivers.
138 * These values are measured by the Digital Signal Processor (DSP). 140 * These values are measured by the Digital Signal Processor (DSP).
139 * They should stay fairly constant even as the signal strength varies, 141 * They should stay fairly constant even as the signal strength varies,
140 * if the radio's Automatic Gain Control (AGC) is working right. 142 * if the radio's Automatic Gain Control (AGC) is working right.
141 * AGC value (see below) will provide the "interesting" info. 143 * AGC value (see below) will provide the "interesting" info.
142 */ 144 */
145 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
146 agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
147 agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
148 max_agc = max_t(u32, agc_a, agc_b);
149
143 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); 150 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
144 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; 151 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
145 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; 152 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
146 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]); 153 rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
147 rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS; 154 IWL_OFDM_RSSI_ALLBAND_A_POS;
148 155 rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
149 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); 156 IWL_OFDM_RSSI_ALLBAND_B_POS;
150 agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
151 157
152 max_rssi = max_t(u32, rssi_a, rssi_b); 158 /*
153 max_rssi = max_t(u32, max_rssi, rssi_c); 159 * dBm = rssi dB - agc dB - constant.
160 * Higher AGC (higher radio gain) means lower signal.
161 */
162 rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
163 rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
164 max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
154 165
155 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", 166 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
156 rssi_a, rssi_b, rssi_c, max_rssi, agc_db); 167 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
157 168
158 /* dBm = max_rssi dB - agc dB - constant. 169 return max_rssi_dbm;
159 * Higher AGC (higher radio gain) means lower signal. */
160 return max_rssi - agc_db - IWL_RSSI_OFFSET;
161} 170}
162 171
163/* 172/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 861a7f9f8e7f..274f44e2ef60 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
770 u16 txq_id; 770 u16 txq_id;
771 int err; 771 int err;
772 772
773
774 /*
775 * If mac80211 is cleaning its state, then say that we finished since
776 * our state has been cleared anyway.
777 */
778 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
779 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
780 return 0;
781 }
782
773 spin_lock_bh(&mvmsta->lock); 783 spin_lock_bh(&mvmsta->lock);
774 784
775 txq_id = tid_data->txq_id; 785 txq_id = tid_data->txq_id;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 6b67ce3f679c..6645efe5c03e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
607 607
608 /* Single frame failure in an AMPDU queue => send BAR */ 608 /* Single frame failure in an AMPDU queue => send BAR */
609 if (txq_id >= IWL_FIRST_AMPDU_QUEUE && 609 if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
610 !(info->flags & IEEE80211_TX_STAT_ACK)) { 610 !(info->flags & IEEE80211_TX_STAT_ACK))
611 /* there must be only one skb in the skb_list */
612 WARN_ON_ONCE(skb_freed > 1 ||
613 !skb_queue_empty(&skbs));
614 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 611 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
615 }
616 612
617 /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ 613 /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
618 if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { 614 if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 3d62e8055352..148843e7f34f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
137struct iwl_cmd_meta { 137struct iwl_cmd_meta {
138 /* only for SYNC commands, iff the reply skb is wanted */ 138 /* only for SYNC commands, iff the reply skb is wanted */
139 struct iwl_host_cmd *source; 139 struct iwl_host_cmd *source;
140
141 DEFINE_DMA_UNMAP_ADDR(mapping);
142 DEFINE_DMA_UNMAP_LEN(len);
143
144 u32 flags; 140 u32 flags;
145}; 141};
146 142
@@ -185,25 +181,36 @@ struct iwl_queue {
185/* 181/*
186 * The FH will write back to the first TB only, so we need 182 * The FH will write back to the first TB only, so we need
187 * to copy some data into the buffer regardless of whether 183 * to copy some data into the buffer regardless of whether
188 * it should be mapped or not. This indicates how much to 184 * it should be mapped or not. This indicates how big the
189 * copy, even for HCMDs it must be big enough to fit the 185 * first TB must be to include the scratch buffer. Since
190 * DRAM scratch from the TX cmd, at least 16 bytes. 186 * the scratch is 4 bytes at offset 12, it's 16 now. If we
187 * make it bigger then allocations will be bigger and copy
188 * slower, so that's probably not useful.
191 */ 189 */
192#define IWL_HCMD_MIN_COPY_SIZE 16 190#define IWL_HCMD_SCRATCHBUF_SIZE 16
193 191
194struct iwl_pcie_txq_entry { 192struct iwl_pcie_txq_entry {
195 struct iwl_device_cmd *cmd; 193 struct iwl_device_cmd *cmd;
196 struct iwl_device_cmd *copy_cmd;
197 struct sk_buff *skb; 194 struct sk_buff *skb;
198 /* buffer to free after command completes */ 195 /* buffer to free after command completes */
199 const void *free_buf; 196 const void *free_buf;
200 struct iwl_cmd_meta meta; 197 struct iwl_cmd_meta meta;
201}; 198};
202 199
200struct iwl_pcie_txq_scratch_buf {
201 struct iwl_cmd_header hdr;
202 u8 buf[8];
203 __le32 scratch;
204};
205
203/** 206/**
204 * struct iwl_txq - Tx Queue for DMA 207 * struct iwl_txq - Tx Queue for DMA
205 * @q: generic Rx/Tx queue descriptor 208 * @q: generic Rx/Tx queue descriptor
206 * @tfds: transmit frame descriptors (DMA memory) 209 * @tfds: transmit frame descriptors (DMA memory)
210 * @scratchbufs: start of command headers, including scratch buffers, for
211 * the writeback -- this is DMA memory and an array holding one buffer
212 * for each command on the queue
213 * @scratchbufs_dma: DMA address for the scratchbufs start
207 * @entries: transmit entries (driver state) 214 * @entries: transmit entries (driver state)
208 * @lock: queue lock 215 * @lock: queue lock
209 * @stuck_timer: timer that fires if queue gets stuck 216 * @stuck_timer: timer that fires if queue gets stuck
@@ -217,6 +224,8 @@ struct iwl_pcie_txq_entry {
217struct iwl_txq { 224struct iwl_txq {
218 struct iwl_queue q; 225 struct iwl_queue q;
219 struct iwl_tfd *tfds; 226 struct iwl_tfd *tfds;
227 struct iwl_pcie_txq_scratch_buf *scratchbufs;
228 dma_addr_t scratchbufs_dma;
220 struct iwl_pcie_txq_entry *entries; 229 struct iwl_pcie_txq_entry *entries;
221 spinlock_t lock; 230 spinlock_t lock;
222 struct timer_list stuck_timer; 231 struct timer_list stuck_timer;
@@ -225,6 +234,13 @@ struct iwl_txq {
225 u8 active; 234 u8 active;
226}; 235};
227 236
237static inline dma_addr_t
238iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
239{
240 return txq->scratchbufs_dma +
241 sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
242}
243
228/** 244/**
229 * struct iwl_trans_pcie - PCIe transport specific data 245 * struct iwl_trans_pcie - PCIe transport specific data
230 * @rxq: all the RX queue data 246 * @rxq: all the RX queue data
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index b0ae06d2456f..567e67ad1f61 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
637 index = SEQ_TO_INDEX(sequence); 637 index = SEQ_TO_INDEX(sequence);
638 cmd_index = get_cmd_index(&txq->q, index); 638 cmd_index = get_cmd_index(&txq->q, index);
639 639
640 if (reclaim) { 640 if (reclaim)
641 struct iwl_pcie_txq_entry *ent; 641 cmd = txq->entries[cmd_index].cmd;
642 ent = &txq->entries[cmd_index]; 642 else
643 cmd = ent->copy_cmd;
644 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
645 } else {
646 cmd = NULL; 643 cmd = NULL;
647 }
648 644
649 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); 645 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
650 646
651 if (reclaim) { 647 if (reclaim) {
652 /* The original command isn't needed any more */
653 kfree(txq->entries[cmd_index].copy_cmd);
654 txq->entries[cmd_index].copy_cmd = NULL;
655 /* nor is the duplicated part of the command */
656 kfree(txq->entries[cmd_index].free_buf); 648 kfree(txq->entries[cmd_index].free_buf);
657 txq->entries[cmd_index].free_buf = NULL; 649 txq->entries[cmd_index].free_buf = NULL;
658 } 650 }
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8b625a7f5685..8595c16f74de 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
191 } 191 }
192 192
193 for (i = q->read_ptr; i != q->write_ptr; 193 for (i = q->read_ptr; i != q->write_ptr;
194 i = iwl_queue_inc_wrap(i, q->n_bd)) { 194 i = iwl_queue_inc_wrap(i, q->n_bd))
195 struct iwl_tx_cmd *tx_cmd =
196 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
197 IWL_ERR(trans, "scratch %d = 0x%08x\n", i, 195 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
198 get_unaligned_le32(&tx_cmd->scratch)); 196 le32_to_cpu(txq->scratchbufs[i].scratch));
199 }
200 197
201 iwl_op_mode_nic_error(trans->op_mode); 198 iwl_op_mode_nic_error(trans->op_mode);
202} 199}
@@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
367} 364}
368 365
369static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 366static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
370 struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, 367 struct iwl_cmd_meta *meta,
371 enum dma_data_direction dma_dir) 368 struct iwl_tfd *tfd)
372{ 369{
373 int i; 370 int i;
374 int num_tbs; 371 int num_tbs;
@@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
382 return; 379 return;
383 } 380 }
384 381
385 /* Unmap tx_cmd */ 382 /* first TB is never freed - it's the scratchbuf data */
386 if (num_tbs)
387 dma_unmap_single(trans->dev,
388 dma_unmap_addr(meta, mapping),
389 dma_unmap_len(meta, len),
390 DMA_BIDIRECTIONAL);
391 383
392 /* Unmap chunks, if any. */
393 for (i = 1; i < num_tbs; i++) 384 for (i = 1; i < num_tbs; i++)
394 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), 385 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
395 iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); 386 iwl_pcie_tfd_tb_get_len(tfd, i),
387 DMA_TO_DEVICE);
396 388
397 tfd->num_tbs = 0; 389 tfd->num_tbs = 0;
398} 390}
@@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
406 * Does NOT advance any TFD circular buffer read/write indexes 398 * Does NOT advance any TFD circular buffer read/write indexes
407 * Does NOT free the TFD itself (which is within circular buffer) 399 * Does NOT free the TFD itself (which is within circular buffer)
408 */ 400 */
409static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 401static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
410 enum dma_data_direction dma_dir)
411{ 402{
412 struct iwl_tfd *tfd_tmp = txq->tfds; 403 struct iwl_tfd *tfd_tmp = txq->tfds;
413 404
@@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
418 lockdep_assert_held(&txq->lock); 409 lockdep_assert_held(&txq->lock);
419 410
420 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 411 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
421 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], 412 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
422 dma_dir);
423 413
424 /* free SKB */ 414 /* free SKB */
425 if (txq->entries) { 415 if (txq->entries) {
@@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
479{ 469{
480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 470 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
481 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; 471 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
472 size_t scratchbuf_sz;
482 int i; 473 int i;
483 474
484 if (WARN_ON(txq->entries || txq->tfds)) 475 if (WARN_ON(txq->entries || txq->tfds))
@@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
514 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); 505 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
515 goto error; 506 goto error;
516 } 507 }
508
509 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
510 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
511 sizeof(struct iwl_cmd_header) +
512 offsetof(struct iwl_tx_cmd, scratch));
513
514 scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
515
516 txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
517 &txq->scratchbufs_dma,
518 GFP_KERNEL);
519 if (!txq->scratchbufs)
520 goto err_free_tfds;
521
517 txq->q.id = txq_id; 522 txq->q.id = txq_id;
518 523
519 return 0; 524 return 0;
525err_free_tfds:
526 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
520error: 527error:
521 if (txq->entries && txq_id == trans_pcie->cmd_queue) 528 if (txq->entries && txq_id == trans_pcie->cmd_queue)
522 for (i = 0; i < slots_num; i++) 529 for (i = 0; i < slots_num; i++)
@@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
565 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 572 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
566 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 573 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
567 struct iwl_queue *q = &txq->q; 574 struct iwl_queue *q = &txq->q;
568 enum dma_data_direction dma_dir;
569 575
570 if (!q->n_bd) 576 if (!q->n_bd)
571 return; 577 return;
572 578
573 /* In the command queue, all the TBs are mapped as BIDI
574 * so unmap them as such.
575 */
576 if (txq_id == trans_pcie->cmd_queue)
577 dma_dir = DMA_BIDIRECTIONAL;
578 else
579 dma_dir = DMA_TO_DEVICE;
580
581 spin_lock_bh(&txq->lock); 579 spin_lock_bh(&txq->lock);
582 while (q->write_ptr != q->read_ptr) { 580 while (q->write_ptr != q->read_ptr) {
583 iwl_pcie_txq_free_tfd(trans, txq, dma_dir); 581 iwl_pcie_txq_free_tfd(trans, txq);
584 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 582 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
585 } 583 }
586 spin_unlock_bh(&txq->lock); 584 spin_unlock_bh(&txq->lock);
@@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
610 if (txq_id == trans_pcie->cmd_queue) 608 if (txq_id == trans_pcie->cmd_queue)
611 for (i = 0; i < txq->q.n_window; i++) { 609 for (i = 0; i < txq->q.n_window; i++) {
612 kfree(txq->entries[i].cmd); 610 kfree(txq->entries[i].cmd);
613 kfree(txq->entries[i].copy_cmd);
614 kfree(txq->entries[i].free_buf); 611 kfree(txq->entries[i].free_buf);
615 } 612 }
616 613
@@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
619 dma_free_coherent(dev, sizeof(struct iwl_tfd) * 616 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
620 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 617 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
621 txq->q.dma_addr = 0; 618 txq->q.dma_addr = 0;
619
620 dma_free_coherent(dev,
621 sizeof(*txq->scratchbufs) * txq->q.n_window,
622 txq->scratchbufs, txq->scratchbufs_dma);
622 } 623 }
623 624
624 kfree(txq->entries); 625 kfree(txq->entries);
@@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
962 963
963 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 964 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
964 965
965 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 966 iwl_pcie_txq_free_tfd(trans, txq);
966 } 967 }
967 968
968 iwl_pcie_txq_progress(trans_pcie, txq); 969 iwl_pcie_txq_progress(trans_pcie, txq);
@@ -1152,29 +1153,29 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1152 void *dup_buf = NULL; 1153 void *dup_buf = NULL;
1153 dma_addr_t phys_addr; 1154 dma_addr_t phys_addr;
1154 int idx; 1155 int idx;
1155 u16 copy_size, cmd_size, dma_size; 1156 u16 copy_size, cmd_size, scratch_size;
1156 bool had_nocopy = false; 1157 bool had_nocopy = false;
1157 int i; 1158 int i;
1158 u32 cmd_pos; 1159 u32 cmd_pos;
1159 const u8 *cmddata[IWL_MAX_CMD_TFDS]; 1160 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1160 u16 cmdlen[IWL_MAX_CMD_TFDS]; 1161 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1161 1162
1162 copy_size = sizeof(out_cmd->hdr); 1163 copy_size = sizeof(out_cmd->hdr);
1163 cmd_size = sizeof(out_cmd->hdr); 1164 cmd_size = sizeof(out_cmd->hdr);
1164 1165
1165 /* need one for the header if the first is NOCOPY */ 1166 /* need one for the header if the first is NOCOPY */
1166 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); 1167 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1167 1168
1168 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1169 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1169 cmddata[i] = cmd->data[i]; 1170 cmddata[i] = cmd->data[i];
1170 cmdlen[i] = cmd->len[i]; 1171 cmdlen[i] = cmd->len[i];
1171 1172
1172 if (!cmd->len[i]) 1173 if (!cmd->len[i])
1173 continue; 1174 continue;
1174 1175
1175 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ 1176 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1176 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { 1177 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1177 int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; 1178 int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1178 1179
1179 if (copy > cmdlen[i]) 1180 if (copy > cmdlen[i])
1180 copy = cmdlen[i]; 1181 copy = cmdlen[i];
@@ -1260,15 +1261,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1260 /* and copy the data that needs to be copied */ 1261 /* and copy the data that needs to be copied */
1261 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1262 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1262 copy_size = sizeof(out_cmd->hdr); 1263 copy_size = sizeof(out_cmd->hdr);
1263 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1264 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1264 int copy = 0; 1265 int copy = 0;
1265 1266
1266 if (!cmd->len) 1267 if (!cmd->len)
1267 continue; 1268 continue;
1268 1269
1269 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ 1270 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1270 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { 1271 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1271 copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; 1272 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1272 1273
1273 if (copy > cmd->len[i]) 1274 if (copy > cmd->len[i])
1274 copy = cmd->len[i]; 1275 copy = cmd->len[i];
@@ -1286,50 +1287,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1286 } 1287 }
1287 } 1288 }
1288 1289
1289 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
1290
1291 /*
1292 * since out_cmd will be the source address of the FH, it will write
1293 * the retry count there. So when the user needs to receivce the HCMD
1294 * that corresponds to the response in the response handler, it needs
1295 * to set CMD_WANT_HCMD.
1296 */
1297 if (cmd->flags & CMD_WANT_HCMD) {
1298 txq->entries[idx].copy_cmd =
1299 kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
1300 if (unlikely(!txq->entries[idx].copy_cmd)) {
1301 idx = -ENOMEM;
1302 goto out;
1303 }
1304 }
1305
1306 IWL_DEBUG_HC(trans, 1290 IWL_DEBUG_HC(trans,
1307 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1291 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1308 get_cmd_string(trans_pcie, out_cmd->hdr.cmd), 1292 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1309 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1293 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1310 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1294 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1311 1295
1312 /* 1296 /* start the TFD with the scratchbuf */
1313 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must 1297 scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
1314 * still map at least that many bytes for the hardware to write back to. 1298 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1315 * We have enough space, so that's not a problem. 1299 iwl_pcie_txq_build_tfd(trans, txq,
1316 */ 1300 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1317 dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE); 1301 scratch_size, 1);
1302
1303 /* map first command fragment, if any remains */
1304 if (copy_size > scratch_size) {
1305 phys_addr = dma_map_single(trans->dev,
1306 ((u8 *)&out_cmd->hdr) + scratch_size,
1307 copy_size - scratch_size,
1308 DMA_TO_DEVICE);
1309 if (dma_mapping_error(trans->dev, phys_addr)) {
1310 iwl_pcie_tfd_unmap(trans, out_meta,
1311 &txq->tfds[q->write_ptr]);
1312 idx = -ENOMEM;
1313 goto out;
1314 }
1318 1315
1319 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size, 1316 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1320 DMA_BIDIRECTIONAL); 1317 copy_size - scratch_size, 0);
1321 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1322 idx = -ENOMEM;
1323 goto out;
1324 } 1318 }
1325 1319
1326 dma_unmap_addr_set(out_meta, mapping, phys_addr);
1327 dma_unmap_len_set(out_meta, len, dma_size);
1328
1329 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1330
1331 /* map the remaining (adjusted) nocopy/dup fragments */ 1320 /* map the remaining (adjusted) nocopy/dup fragments */
1332 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1321 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1333 const void *data = cmddata[i]; 1322 const void *data = cmddata[i];
1334 1323
1335 if (!cmdlen[i]) 1324 if (!cmdlen[i])
@@ -1340,11 +1329,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1340 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1329 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1341 data = dup_buf; 1330 data = dup_buf;
1342 phys_addr = dma_map_single(trans->dev, (void *)data, 1331 phys_addr = dma_map_single(trans->dev, (void *)data,
1343 cmdlen[i], DMA_BIDIRECTIONAL); 1332 cmdlen[i], DMA_TO_DEVICE);
1344 if (dma_mapping_error(trans->dev, phys_addr)) { 1333 if (dma_mapping_error(trans->dev, phys_addr)) {
1345 iwl_pcie_tfd_unmap(trans, out_meta, 1334 iwl_pcie_tfd_unmap(trans, out_meta,
1346 &txq->tfds[q->write_ptr], 1335 &txq->tfds[q->write_ptr]);
1347 DMA_BIDIRECTIONAL);
1348 idx = -ENOMEM; 1336 idx = -ENOMEM;
1349 goto out; 1337 goto out;
1350 } 1338 }
@@ -1418,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1418 cmd = txq->entries[cmd_index].cmd; 1406 cmd = txq->entries[cmd_index].cmd;
1419 meta = &txq->entries[cmd_index].meta; 1407 meta = &txq->entries[cmd_index].meta;
1420 1408
1421 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 1409 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
1422 1410
1423 /* Input error checking is done when commands are added to queue. */ 1411 /* Input error checking is done when commands are added to queue. */
1424 if (meta->flags & CMD_WANT_SKB) { 1412 if (meta->flags & CMD_WANT_SKB) {
@@ -1597,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1597 struct iwl_cmd_meta *out_meta; 1585 struct iwl_cmd_meta *out_meta;
1598 struct iwl_txq *txq; 1586 struct iwl_txq *txq;
1599 struct iwl_queue *q; 1587 struct iwl_queue *q;
1600 dma_addr_t phys_addr = 0; 1588 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1601 dma_addr_t txcmd_phys; 1589 void *tb1_addr;
1602 dma_addr_t scratch_phys; 1590 u16 len, tb1_len, tb2_len;
1603 u16 len, firstlen, secondlen;
1604 u8 wait_write_ptr = 0; 1591 u8 wait_write_ptr = 0;
1605 __le16 fc = hdr->frame_control; 1592 __le16 fc = hdr->frame_control;
1606 u8 hdr_len = ieee80211_hdrlen(fc); 1593 u8 hdr_len = ieee80211_hdrlen(fc);
@@ -1638,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1638 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1625 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1639 INDEX_TO_SEQ(q->write_ptr))); 1626 INDEX_TO_SEQ(q->write_ptr)));
1640 1627
1628 tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
1629 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1630 offsetof(struct iwl_tx_cmd, scratch);
1631
1632 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1633 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1634
1641 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1635 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1642 out_meta = &txq->entries[q->write_ptr].meta; 1636 out_meta = &txq->entries[q->write_ptr].meta;
1643 1637
1644 /* 1638 /*
1645 * Use the first empty entry in this queue's command buffer array 1639 * The second TB (tb1) points to the remainder of the TX command
1646 * to contain the Tx command and MAC header concatenated together 1640 * and the 802.11 header - dword aligned size
1647 * (payload data will be in another buffer). 1641 * (This calculation modifies the TX command, so do it before the
1648 * Size of this varies, due to varying MAC header length. 1642 * setup of the first TB)
1649 * If end is not dword aligned, we'll have 2 extra bytes at the end
1650 * of the MAC header (device reads on dword boundaries).
1651 * We'll tell device about this padding later.
1652 */ 1643 */
1653 len = sizeof(struct iwl_tx_cmd) + 1644 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1654 sizeof(struct iwl_cmd_header) + hdr_len; 1645 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1655 firstlen = (len + 3) & ~3; 1646 tb1_len = (len + 3) & ~3;
1656 1647
1657 /* Tell NIC about any 2-byte padding after MAC header */ 1648 /* Tell NIC about any 2-byte padding after MAC header */
1658 if (firstlen != len) 1649 if (tb1_len != len)
1659 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 1650 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1660 1651
1661 /* Physical address of this Tx command's header (not MAC header!), 1652 /* The first TB points to the scratchbuf data - min_copy bytes */
1662 * within command buffer array. */ 1653 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1663 txcmd_phys = dma_map_single(trans->dev, 1654 IWL_HCMD_SCRATCHBUF_SIZE);
1664 &dev_cmd->hdr, firstlen, 1655 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1665 DMA_BIDIRECTIONAL); 1656 IWL_HCMD_SCRATCHBUF_SIZE, 1);
1666 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1667 goto out_err;
1668 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1669 dma_unmap_len_set(out_meta, len, firstlen);
1670 1657
1671 if (!ieee80211_has_morefrags(fc)) { 1658 /* there must be data left over for TB1 or this code must be changed */
1672 txq->need_update = 1; 1659 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
1673 } else {
1674 wait_write_ptr = 1;
1675 txq->need_update = 0;
1676 }
1677 1660
1678 /* Set up TFD's 2nd entry to point directly to remainder of skb, 1661 /* map the data for TB1 */
1679 * if any (802.11 null frames have no payload). */ 1662 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
1680 secondlen = skb->len - hdr_len; 1663 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1681 if (secondlen > 0) { 1664 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1682 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, 1665 goto out_err;
1683 secondlen, DMA_TO_DEVICE); 1666 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
1684 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 1667
1685 dma_unmap_single(trans->dev, 1668 /*
1686 dma_unmap_addr(out_meta, mapping), 1669 * Set up TFD's third entry to point directly to remainder
1687 dma_unmap_len(out_meta, len), 1670 * of skb, if any (802.11 null frames have no payload).
1688 DMA_BIDIRECTIONAL); 1671 */
1672 tb2_len = skb->len - hdr_len;
1673 if (tb2_len > 0) {
1674 dma_addr_t tb2_phys = dma_map_single(trans->dev,
1675 skb->data + hdr_len,
1676 tb2_len, DMA_TO_DEVICE);
1677 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1678 iwl_pcie_tfd_unmap(trans, out_meta,
1679 &txq->tfds[q->write_ptr]);
1689 goto out_err; 1680 goto out_err;
1690 } 1681 }
1682 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
1691 } 1683 }
1692 1684
1693 /* Attach buffers to TFD */
1694 iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1695 if (secondlen > 0)
1696 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
1697
1698 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1699 offsetof(struct iwl_tx_cmd, scratch);
1700
1701 /* take back ownership of DMA buffer to enable update */
1702 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1703 DMA_BIDIRECTIONAL);
1704 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1705 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1706
1707 /* Set up entry for this TFD in Tx byte-count array */ 1685 /* Set up entry for this TFD in Tx byte-count array */
1708 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); 1686 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1709 1687
1710 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1711 DMA_BIDIRECTIONAL);
1712
1713 trace_iwlwifi_dev_tx(trans->dev, skb, 1688 trace_iwlwifi_dev_tx(trans->dev, skb,
1714 &txq->tfds[txq->q.write_ptr], 1689 &txq->tfds[txq->q.write_ptr],
1715 sizeof(struct iwl_tfd), 1690 sizeof(struct iwl_tfd),
1716 &dev_cmd->hdr, firstlen, 1691 &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
1717 skb->data + hdr_len, secondlen); 1692 skb->data + hdr_len, tb2_len);
1718 trace_iwlwifi_dev_tx_data(trans->dev, skb, 1693 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1719 skb->data + hdr_len, secondlen); 1694 skb->data + hdr_len, tb2_len);
1695
1696 if (!ieee80211_has_morefrags(fc)) {
1697 txq->need_update = 1;
1698 } else {
1699 wait_write_ptr = 1;
1700 txq->need_update = 0;
1701 }
1720 1702
1721 /* start timer if queue currently empty */ 1703 /* start timer if queue currently empty */
1722 if (txq->need_update && q->read_ptr == q->write_ptr && 1704 if (txq->need_update && q->read_ptr == q->write_ptr &&
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 445ffda715ad..7c12d9c2b230 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -276,6 +276,7 @@ static struct file_system_type oprofilefs_type = {
276 .mount = oprofilefs_mount, 276 .mount = oprofilefs_mount,
277 .kill_sb = kill_litter_super, 277 .kill_sb = kill_litter_super,
278}; 278};
279MODULE_ALIAS_FS("oprofilefs");
279 280
280 281
281int __init oprofilefs_register(void) 282int __init oprofilefs_register(void)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 39c937f9b426..dee5dddaa292 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -331,8 +331,14 @@ static void pci_acpi_cleanup(struct device *dev)
331 } 331 }
332} 332}
333 333
334static bool pci_acpi_bus_match(struct device *dev)
335{
336 return dev->bus == &pci_bus_type;
337}
338
334static struct acpi_bus_type acpi_pci_bus = { 339static struct acpi_bus_type acpi_pci_bus = {
335 .bus = &pci_bus_type, 340 .name = "PCI",
341 .match = pci_acpi_bus_match,
336 .find_device = acpi_pci_find_device, 342 .find_device = acpi_pci_find_device,
337 .setup = pci_acpi_setup, 343 .setup = pci_acpi_setup,
338 .cleanup = pci_acpi_cleanup, 344 .cleanup = pci_acpi_cleanup,
diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
index 93d66809355a..3e5b4497a1d0 100644
--- a/drivers/platform/x86/chromeos_laptop.c
+++ b/drivers/platform/x86/chromeos_laptop.c
@@ -23,6 +23,9 @@
23 23
24#include <linux/dmi.h> 24#include <linux/dmi.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/i2c/atmel_mxt_ts.h>
27#include <linux/input.h>
28#include <linux/interrupt.h>
26#include <linux/module.h> 29#include <linux/module.h>
27 30
28#define ATMEL_TP_I2C_ADDR 0x4b 31#define ATMEL_TP_I2C_ADDR 0x4b
@@ -67,15 +70,49 @@ static struct i2c_board_info __initdata tsl2563_als_device = {
67 I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR), 70 I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
68}; 71};
69 72
73static struct mxt_platform_data atmel_224s_tp_platform_data = {
74 .x_line = 18,
75 .y_line = 12,
76 .x_size = 102*20,
77 .y_size = 68*20,
78 .blen = 0x80, /* Gain setting is in upper 4 bits */
79 .threshold = 0x32,
80 .voltage = 0, /* 3.3V */
81 .orient = MXT_VERTICAL_FLIP,
82 .irqflags = IRQF_TRIGGER_FALLING,
83 .is_tp = true,
84 .key_map = { KEY_RESERVED,
85 KEY_RESERVED,
86 KEY_RESERVED,
87 BTN_LEFT },
88 .config = NULL,
89 .config_length = 0,
90};
91
70static struct i2c_board_info __initdata atmel_224s_tp_device = { 92static struct i2c_board_info __initdata atmel_224s_tp_device = {
71 I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR), 93 I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
72 .platform_data = NULL, 94 .platform_data = &atmel_224s_tp_platform_data,
73 .flags = I2C_CLIENT_WAKE, 95 .flags = I2C_CLIENT_WAKE,
74}; 96};
75 97
98static struct mxt_platform_data atmel_1664s_platform_data = {
99 .x_line = 32,
100 .y_line = 50,
101 .x_size = 1700,
102 .y_size = 2560,
103 .blen = 0x89, /* Gain setting is in upper 4 bits */
104 .threshold = 0x28,
105 .voltage = 0, /* 3.3V */
106 .orient = MXT_ROTATED_90_COUNTER,
107 .irqflags = IRQF_TRIGGER_FALLING,
108 .is_tp = false,
109 .config = NULL,
110 .config_length = 0,
111};
112
76static struct i2c_board_info __initdata atmel_1664s_device = { 113static struct i2c_board_info __initdata atmel_1664s_device = {
77 I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR), 114 I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
78 .platform_data = NULL, 115 .platform_data = &atmel_1664s_platform_data,
79 .flags = I2C_CLIENT_WAKE, 116 .flags = I2C_CLIENT_WAKE,
80}; 117};
81 118
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 8813fc03aa09..55cd459a3908 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -353,8 +353,14 @@ static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
353/* complete initialization of a PNPACPI device includes having 353/* complete initialization of a PNPACPI device includes having
354 * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling. 354 * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
355 */ 355 */
356static bool acpi_pnp_bus_match(struct device *dev)
357{
358 return dev->bus == &pnp_bus_type;
359}
360
356static struct acpi_bus_type __initdata acpi_pnp_bus = { 361static struct acpi_bus_type __initdata acpi_pnp_bus = {
357 .bus = &pnp_bus_type, 362 .name = "PNP",
363 .match = acpi_pnp_bus_match,
358 .find_device = acpi_pnp_find_device, 364 .find_device = acpi_pnp_find_device,
359}; 365};
360 366
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index da9782bd27d0..e3661c20cf38 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2830,7 +2830,7 @@ EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
2830 * regulator_allow_bypass - allow the regulator to go into bypass mode 2830 * regulator_allow_bypass - allow the regulator to go into bypass mode
2831 * 2831 *
2832 * @regulator: Regulator to configure 2832 * @regulator: Regulator to configure
2833 * @allow: enable or disable bypass mode 2833 * @enable: enable or disable bypass mode
2834 * 2834 *
2835 * Allow the regulator to go into bypass mode if all other consumers 2835 * Allow the regulator to go into bypass mode if all other consumers
2836 * for the regulator also enable bypass mode and the machine 2836 * for the regulator also enable bypass mode and the machine
@@ -3057,9 +3057,13 @@ int regulator_bulk_enable(int num_consumers,
3057 return 0; 3057 return 0;
3058 3058
3059err: 3059err:
3060 pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret); 3060 for (i = 0; i < num_consumers; i++) {
3061 while (--i >= 0) 3061 if (consumers[i].ret < 0)
3062 regulator_disable(consumers[i].consumer); 3062 pr_err("Failed to enable %s: %d\n", consumers[i].supply,
3063 consumers[i].ret);
3064 else
3065 regulator_disable(consumers[i].consumer);
3066 }
3063 3067
3064 return ret; 3068 return ret;
3065} 3069}
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 219d162b651e..a53c11a529d5 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -528,7 +528,7 @@ static int db8500_regulator_probe(struct platform_device *pdev)
528 return 0; 528 return 0;
529} 529}
530 530
531static int __exit db8500_regulator_remove(struct platform_device *pdev) 531static int db8500_regulator_remove(struct platform_device *pdev)
532{ 532{
533 int i; 533 int i;
534 534
@@ -553,7 +553,7 @@ static struct platform_driver db8500_regulator_driver = {
553 .owner = THIS_MODULE, 553 .owner = THIS_MODULE,
554 }, 554 },
555 .probe = db8500_regulator_probe, 555 .probe = db8500_regulator_probe,
556 .remove = __exit_p(db8500_regulator_remove), 556 .remove = db8500_regulator_remove,
557}; 557};
558 558
559static int __init db8500_regulator_init(void) 559static int __init db8500_regulator_init(void)
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index cde13bb5a8fb..39cf14606784 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -4,6 +4,7 @@
4 * Copyright 2011-2012 Texas Instruments Inc. 4 * Copyright 2011-2012 Texas Instruments Inc.
5 * 5 *
6 * Author: Graeme Gregory <gg@slimlogic.co.uk> 6 * Author: Graeme Gregory <gg@slimlogic.co.uk>
7 * Author: Ian Lartey <ian@slimlogic.co.uk>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 10 * under the terms of the GNU General Public License as published by the
@@ -156,7 +157,7 @@ static const struct regs_info palmas_regs_info[] = {
156 * 157 *
157 * So they are basically (maxV-minV)/stepV 158 * So they are basically (maxV-minV)/stepV
158 */ 159 */
159#define PALMAS_SMPS_NUM_VOLTAGES 116 160#define PALMAS_SMPS_NUM_VOLTAGES 117
160#define PALMAS_SMPS10_NUM_VOLTAGES 2 161#define PALMAS_SMPS10_NUM_VOLTAGES 2
161#define PALMAS_LDO_NUM_VOLTAGES 50 162#define PALMAS_LDO_NUM_VOLTAGES 50
162 163
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 74508cc62d67..f705d25b437c 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -471,24 +471,23 @@ twl4030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
471 selector); 471 selector);
472} 472}
473 473
474static int twl4030ldo_get_voltage(struct regulator_dev *rdev) 474static int twl4030ldo_get_voltage_sel(struct regulator_dev *rdev)
475{ 475{
476 struct twlreg_info *info = rdev_get_drvdata(rdev); 476 struct twlreg_info *info = rdev_get_drvdata(rdev);
477 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, 477 int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
478 VREG_VOLTAGE);
479 478
480 if (vsel < 0) 479 if (vsel < 0)
481 return vsel; 480 return vsel;
482 481
483 vsel &= info->table_len - 1; 482 vsel &= info->table_len - 1;
484 return LDO_MV(info->table[vsel]) * 1000; 483 return vsel;
485} 484}
486 485
487static struct regulator_ops twl4030ldo_ops = { 486static struct regulator_ops twl4030ldo_ops = {
488 .list_voltage = twl4030ldo_list_voltage, 487 .list_voltage = twl4030ldo_list_voltage,
489 488
490 .set_voltage_sel = twl4030ldo_set_voltage_sel, 489 .set_voltage_sel = twl4030ldo_set_voltage_sel,
491 .get_voltage = twl4030ldo_get_voltage, 490 .get_voltage_sel = twl4030ldo_get_voltage_sel,
492 491
493 .enable = twl4030reg_enable, 492 .enable = twl4030reg_enable,
494 .disable = twl4030reg_disable, 493 .disable = twl4030reg_disable,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 765398c063c7..c31187d79343 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -71,9 +71,14 @@ struct kmem_cache *scsi_sdb_cache;
71#ifdef CONFIG_ACPI 71#ifdef CONFIG_ACPI
72#include <acpi/acpi_bus.h> 72#include <acpi/acpi_bus.h>
73 73
74static bool acpi_scsi_bus_match(struct device *dev)
75{
76 return dev->bus == &scsi_bus_type;
77}
78
74int scsi_register_acpi_bus_type(struct acpi_bus_type *bus) 79int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
75{ 80{
76 bus->bus = &scsi_bus_type; 81 bus->match = acpi_scsi_bus_match;
77 return register_acpi_bus_type(bus); 82 return register_acpi_bus_type(bus);
78} 83}
79EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type); 84EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
diff --git a/drivers/staging/ccg/f_fs.c b/drivers/staging/ccg/f_fs.c
index 8adc79d1b402..f6373dade7fb 100644
--- a/drivers/staging/ccg/f_fs.c
+++ b/drivers/staging/ccg/f_fs.c
@@ -1223,6 +1223,7 @@ static struct file_system_type ffs_fs_type = {
1223 .mount = ffs_fs_mount, 1223 .mount = ffs_fs_mount,
1224 .kill_sb = ffs_fs_kill_sb, 1224 .kill_sb = ffs_fs_kill_sb,
1225}; 1225};
1226MODULE_ALIAS_FS("functionfs");
1226 1227
1227 1228
1228/* Driver's main init/cleanup functions *************************************/ 1229/* Driver's main init/cleanup functions *************************************/
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index cef4252bb31a..b6f4bad3f756 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -210,9 +210,14 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
210 return 0; 210 return 0;
211} 211}
212 212
213static bool usb_acpi_bus_match(struct device *dev)
214{
215 return is_usb_device(dev) || is_usb_port(dev);
216}
217
213static struct acpi_bus_type usb_acpi_bus = { 218static struct acpi_bus_type usb_acpi_bus = {
214 .bus = &usb_bus_type, 219 .name = "USB",
215 .find_bridge = usb_acpi_find_device, 220 .match = usb_acpi_bus_match,
216 .find_device = usb_acpi_find_device, 221 .find_device = usb_acpi_find_device,
217}; 222};
218 223
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 38388d7844fc..c377ff84bf2c 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1235,6 +1235,7 @@ static struct file_system_type ffs_fs_type = {
1235 .mount = ffs_fs_mount, 1235 .mount = ffs_fs_mount,
1236 .kill_sb = ffs_fs_kill_sb, 1236 .kill_sb = ffs_fs_kill_sb,
1237}; 1237};
1238MODULE_ALIAS_FS("functionfs");
1238 1239
1239 1240
1240/* Driver's main init/cleanup functions *************************************/ 1241/* Driver's main init/cleanup functions *************************************/
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 8ac840f25ba9..e2b2e9cf254a 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -2105,6 +2105,7 @@ static struct file_system_type gadgetfs_type = {
2105 .mount = gadgetfs_mount, 2105 .mount = gadgetfs_mount,
2106 .kill_sb = gadgetfs_kill_sb, 2106 .kill_sb = gadgetfs_kill_sb,
2107}; 2107};
2108MODULE_ALIAS_FS("gadgetfs");
2108 2109
2109/*----------------------------------------------------------------------*/ 2110/*----------------------------------------------------------------------*/
2110 2111
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index ec0abb6df3c3..71679875f056 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -75,6 +75,7 @@ static struct file_system_type xenfs_type = {
75 .mount = xenfs_mount, 75 .mount = xenfs_mount,
76 .kill_sb = kill_litter_super, 76 .kill_sb = kill_litter_super,
77}; 77};
78MODULE_ALIAS_FS("xenfs");
78 79
79static int __init xenfs_init(void) 80static int __init xenfs_init(void)
80{ 81{