aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-01-14 18:39:23 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-01-14 18:39:23 -0500
commit919ba4ee1badf6247f8064bdec2c80e389b57c40 (patch)
treee18a358f60e67901093f91a300bbf7b7a6e4d543 /drivers
parent06087cb42dc1da096c37f51ea12ad676f00569a9 (diff)
parent04f9c6e6d17584340fb6c8a9469a0e6df28876d2 (diff)
Merge tag 'usb-serial-3.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/johan/usb-serial into usb-linus
Johan writes: USB-serial fixes for v3.18-rc5 Here are a few fixes for reported problems including a possible null-deref on probe with keyspan, a misbehaving modem, and a couple of issues with the USB console. Some new device IDs are also added. Signed-off-by: Johan Hovold <johan@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c46
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c8
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h15
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c60
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c28
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c69
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/tegra/dc.c48
-rw-r--r--drivers/gpu/drm/tegra/drm.c16
-rw-r--r--drivers/gpu/drm/tegra/gem.c52
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c2
-rw-r--r--drivers/usb/serial/console.c16
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/generic.c4
-rw-r--r--drivers/usb/serial/keyspan.c20
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/qcserial.c1
54 files changed, 506 insertions, 259 deletions
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 19db03667650..dcbbb4ea3cc1 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void)
417module_init(agp_ali_init); 417module_init(agp_ali_init);
418module_exit(agp_ali_cleanup); 418module_exit(agp_ali_cleanup);
419 419
420MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 420MODULE_AUTHOR("Dave Jones");
421MODULE_LICENSE("GPL and additional rights"); 421MODULE_LICENSE("GPL and additional rights");
422 422
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 3b47ed0310e1..0ef350010766 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void)
813module_init(agp_amd64_mod_init); 813module_init(agp_amd64_mod_init);
814module_exit(agp_amd64_cleanup); 814module_exit(agp_amd64_cleanup);
815 815
816MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); 816MODULE_AUTHOR("Dave Jones, Andi Kleen");
817module_param(agp_try_unsupported, bool, 0); 817module_param(agp_try_unsupported, bool, 0);
818MODULE_LICENSE("GPL"); 818MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 18a7a6baa304..75a9786a77e6 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void)
579module_init(agp_ati_init); 579module_init(agp_ati_init);
580module_exit(agp_ati_cleanup); 580module_exit(agp_ati_cleanup);
581 581
582MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 582MODULE_AUTHOR("Dave Jones");
583MODULE_LICENSE("GPL and additional rights"); 583MODULE_LICENSE("GPL and additional rights");
584 584
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 317c28ce8328..38ffb281df97 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -356,7 +356,7 @@ static __init int agp_setup(char *s)
356__setup("agp=", agp_setup); 356__setup("agp=", agp_setup);
357#endif 357#endif
358 358
359MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 359MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
360MODULE_DESCRIPTION("AGP GART driver"); 360MODULE_DESCRIPTION("AGP GART driver");
361MODULE_LICENSE("GPL and additional rights"); 361MODULE_LICENSE("GPL and additional rights");
362MODULE_ALIAS_MISCDEV(AGPGART_MINOR); 362MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index f9b9ca5d31b7..0a21daed5b62 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void)
920module_init(agp_intel_init); 920module_init(agp_intel_init);
921module_exit(agp_intel_cleanup); 921module_exit(agp_intel_cleanup);
922 922
923MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 923MODULE_AUTHOR("Dave Jones, Various @Intel");
924MODULE_LICENSE("GPL and additional rights"); 924MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index f3334829e55a..92aa43fa8d70 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1438,5 +1438,5 @@ void intel_gmch_remove(void)
1438} 1438}
1439EXPORT_SYMBOL(intel_gmch_remove); 1439EXPORT_SYMBOL(intel_gmch_remove);
1440 1440
1441MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 1441MODULE_AUTHOR("Dave Jones, Various @Intel");
1442MODULE_LICENSE("GPL and additional rights"); 1442MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index a1861b75eb31..6c8d39cb566e 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Nvidia AGPGART routines. 2 * Nvidia AGPGART routines.
3 * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up 3 * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
4 * to work in 2.5 by Dave Jones <davej@redhat.com> 4 * to work in 2.5 by Dave Jones.
5 */ 5 */
6 6
7#include <linux/module.h> 7#include <linux/module.h>
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 228f20cddc05..a4961d35e940 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -595,4 +595,4 @@ module_init(agp_via_init);
595module_exit(agp_via_cleanup); 595module_exit(agp_via_cleanup);
596 596
597MODULE_LICENSE("GPL"); 597MODULE_LICENSE("GPL");
598MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 598MODULE_AUTHOR("Dave Jones");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5fa83f751378..6b65fa4e0c55 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -199,18 +199,6 @@ struct bmc_device {
199 int guid_set; 199 int guid_set;
200 char name[16]; 200 char name[16];
201 struct kref usecount; 201 struct kref usecount;
202
203 /* bmc device attributes */
204 struct device_attribute device_id_attr;
205 struct device_attribute provides_dev_sdrs_attr;
206 struct device_attribute revision_attr;
207 struct device_attribute firmware_rev_attr;
208 struct device_attribute version_attr;
209 struct device_attribute add_dev_support_attr;
210 struct device_attribute manufacturer_id_attr;
211 struct device_attribute product_id_attr;
212 struct device_attribute guid_attr;
213 struct device_attribute aux_firmware_rev_attr;
214}; 202};
215#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) 203#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
216 204
@@ -2252,7 +2240,7 @@ static ssize_t device_id_show(struct device *dev,
2252 2240
2253 return snprintf(buf, 10, "%u\n", bmc->id.device_id); 2241 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2254} 2242}
2255DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL); 2243static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
2256 2244
2257static ssize_t provides_device_sdrs_show(struct device *dev, 2245static ssize_t provides_device_sdrs_show(struct device *dev,
2258 struct device_attribute *attr, 2246 struct device_attribute *attr,
@@ -2263,7 +2251,8 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
2263 return snprintf(buf, 10, "%u\n", 2251 return snprintf(buf, 10, "%u\n",
2264 (bmc->id.device_revision & 0x80) >> 7); 2252 (bmc->id.device_revision & 0x80) >> 7);
2265} 2253}
2266DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL); 2254static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
2255 NULL);
2267 2256
2268static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2257static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2269 char *buf) 2258 char *buf)
@@ -2273,7 +2262,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2273 return snprintf(buf, 20, "%u\n", 2262 return snprintf(buf, 20, "%u\n",
2274 bmc->id.device_revision & 0x0F); 2263 bmc->id.device_revision & 0x0F);
2275} 2264}
2276DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL); 2265static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
2277 2266
2278static ssize_t firmware_revision_show(struct device *dev, 2267static ssize_t firmware_revision_show(struct device *dev,
2279 struct device_attribute *attr, 2268 struct device_attribute *attr,
@@ -2284,7 +2273,7 @@ static ssize_t firmware_revision_show(struct device *dev,
2284 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, 2273 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2285 bmc->id.firmware_revision_2); 2274 bmc->id.firmware_revision_2);
2286} 2275}
2287DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL); 2276static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
2288 2277
2289static ssize_t ipmi_version_show(struct device *dev, 2278static ssize_t ipmi_version_show(struct device *dev,
2290 struct device_attribute *attr, 2279 struct device_attribute *attr,
@@ -2296,7 +2285,7 @@ static ssize_t ipmi_version_show(struct device *dev,
2296 ipmi_version_major(&bmc->id), 2285 ipmi_version_major(&bmc->id),
2297 ipmi_version_minor(&bmc->id)); 2286 ipmi_version_minor(&bmc->id));
2298} 2287}
2299DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL); 2288static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
2300 2289
2301static ssize_t add_dev_support_show(struct device *dev, 2290static ssize_t add_dev_support_show(struct device *dev,
2302 struct device_attribute *attr, 2291 struct device_attribute *attr,
@@ -2307,7 +2296,8 @@ static ssize_t add_dev_support_show(struct device *dev,
2307 return snprintf(buf, 10, "0x%02x\n", 2296 return snprintf(buf, 10, "0x%02x\n",
2308 bmc->id.additional_device_support); 2297 bmc->id.additional_device_support);
2309} 2298}
2310DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL); 2299static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2300 NULL);
2311 2301
2312static ssize_t manufacturer_id_show(struct device *dev, 2302static ssize_t manufacturer_id_show(struct device *dev,
2313 struct device_attribute *attr, 2303 struct device_attribute *attr,
@@ -2317,7 +2307,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
2317 2307
2318 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); 2308 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2319} 2309}
2320DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL); 2310static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
2321 2311
2322static ssize_t product_id_show(struct device *dev, 2312static ssize_t product_id_show(struct device *dev,
2323 struct device_attribute *attr, 2313 struct device_attribute *attr,
@@ -2327,7 +2317,7 @@ static ssize_t product_id_show(struct device *dev,
2327 2317
2328 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); 2318 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2329} 2319}
2330DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL); 2320static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
2331 2321
2332static ssize_t aux_firmware_rev_show(struct device *dev, 2322static ssize_t aux_firmware_rev_show(struct device *dev,
2333 struct device_attribute *attr, 2323 struct device_attribute *attr,
@@ -2341,7 +2331,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
2341 bmc->id.aux_firmware_revision[1], 2331 bmc->id.aux_firmware_revision[1],
2342 bmc->id.aux_firmware_revision[0]); 2332 bmc->id.aux_firmware_revision[0]);
2343} 2333}
2344DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL); 2334static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2345 2335
2346static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2336static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2347 char *buf) 2337 char *buf)
@@ -2352,7 +2342,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2352 (long long) bmc->guid[0], 2342 (long long) bmc->guid[0],
2353 (long long) bmc->guid[8]); 2343 (long long) bmc->guid[8]);
2354} 2344}
2355DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL); 2345static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
2356 2346
2357static struct attribute *bmc_dev_attrs[] = { 2347static struct attribute *bmc_dev_attrs[] = {
2358 &dev_attr_device_id.attr, 2348 &dev_attr_device_id.attr,
@@ -2392,10 +2382,10 @@ cleanup_bmc_device(struct kref *ref)
2392 2382
2393 if (bmc->id.aux_firmware_revision_set) 2383 if (bmc->id.aux_firmware_revision_set)
2394 device_remove_file(&bmc->pdev.dev, 2384 device_remove_file(&bmc->pdev.dev,
2395 &bmc->aux_firmware_rev_attr); 2385 &dev_attr_aux_firmware_revision);
2396 if (bmc->guid_set) 2386 if (bmc->guid_set)
2397 device_remove_file(&bmc->pdev.dev, 2387 device_remove_file(&bmc->pdev.dev,
2398 &bmc->guid_attr); 2388 &dev_attr_guid);
2399 2389
2400 platform_device_unregister(&bmc->pdev); 2390 platform_device_unregister(&bmc->pdev);
2401} 2391}
@@ -2422,16 +2412,14 @@ static int create_bmc_files(struct bmc_device *bmc)
2422 int err; 2412 int err;
2423 2413
2424 if (bmc->id.aux_firmware_revision_set) { 2414 if (bmc->id.aux_firmware_revision_set) {
2425 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2426 err = device_create_file(&bmc->pdev.dev, 2415 err = device_create_file(&bmc->pdev.dev,
2427 &bmc->aux_firmware_rev_attr); 2416 &dev_attr_aux_firmware_revision);
2428 if (err) 2417 if (err)
2429 goto out; 2418 goto out;
2430 } 2419 }
2431 if (bmc->guid_set) { 2420 if (bmc->guid_set) {
2432 bmc->guid_attr.attr.name = "guid";
2433 err = device_create_file(&bmc->pdev.dev, 2421 err = device_create_file(&bmc->pdev.dev,
2434 &bmc->guid_attr); 2422 &dev_attr_guid);
2435 if (err) 2423 if (err)
2436 goto out_aux_firm; 2424 goto out_aux_firm;
2437 } 2425 }
@@ -2441,7 +2429,7 @@ static int create_bmc_files(struct bmc_device *bmc)
2441out_aux_firm: 2429out_aux_firm:
2442 if (bmc->id.aux_firmware_revision_set) 2430 if (bmc->id.aux_firmware_revision_set)
2443 device_remove_file(&bmc->pdev.dev, 2431 device_remove_file(&bmc->pdev.dev,
2444 &bmc->aux_firmware_rev_attr); 2432 &dev_attr_aux_firmware_revision);
2445out: 2433out:
2446 return err; 2434 return err;
2447} 2435}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index e178ac27e73c..fd5a5e85d7dc 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -52,6 +52,7 @@
52#include <linux/dmi.h> 52#include <linux/dmi.h>
53#include <linux/kthread.h> 53#include <linux/kthread.h>
54#include <linux/acpi.h> 54#include <linux/acpi.h>
55#include <linux/ctype.h>
55 56
56#define PFX "ipmi_ssif: " 57#define PFX "ipmi_ssif: "
57#define DEVICE_NAME "ipmi_ssif" 58#define DEVICE_NAME "ipmi_ssif"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 4f7b275f2f7b..7d4974b83af7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -121,13 +121,9 @@ static int kfd_open(struct inode *inode, struct file *filep)
121 if (IS_ERR(process)) 121 if (IS_ERR(process))
122 return PTR_ERR(process); 122 return PTR_ERR(process);
123 123
124 process->is_32bit_user_mode = is_32bit_user_mode;
125
126 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", 124 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
127 process->pasid, process->is_32bit_user_mode); 125 process->pasid, process->is_32bit_user_mode);
128 126
129 kfd_init_apertures(process);
130
131 return 0; 127 return 0;
132} 128}
133 129
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 66df4da01c29..e64aa99e5e41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process)
299 struct kfd_dev *dev; 299 struct kfd_dev *dev;
300 struct kfd_process_device *pdd; 300 struct kfd_process_device *pdd;
301 301
302 mutex_lock(&process->mutex);
303
304 /*Iterating over all devices*/ 302 /*Iterating over all devices*/
305 while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL && 303 while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
306 id < NUM_OF_SUPPORTED_GPUS) { 304 id < NUM_OF_SUPPORTED_GPUS) {
307 305
308 pdd = kfd_get_process_device_data(dev, process, 1); 306 pdd = kfd_get_process_device_data(dev, process, 1);
307 if (!pdd)
308 return -1;
309 309
310 /* 310 /*
311 * For 64 bit process aperture will be statically reserved in 311 * For 64 bit process aperture will be statically reserved in
@@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process)
348 id++; 348 id++;
349 } 349 }
350 350
351 mutex_unlock(&process->mutex);
352
353 return 0; 351 return 0;
354} 352}
355 353
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b85eb0b830b4..3c76ef05cbcf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -26,6 +26,8 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/amd-iommu.h> 27#include <linux/amd-iommu.h>
28#include <linux/notifier.h> 28#include <linux/notifier.h>
29#include <linux/compat.h>
30
29struct mm_struct; 31struct mm_struct;
30 32
31#include "kfd_priv.h" 33#include "kfd_priv.h"
@@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
285 if (err != 0) 287 if (err != 0)
286 goto err_process_pqm_init; 288 goto err_process_pqm_init;
287 289
290 /* init process apertures*/
291 process->is_32bit_user_mode = is_compat_task();
292 if (kfd_init_apertures(process) != 0)
293 goto err_init_apretures;
294
288 return process; 295 return process;
289 296
297err_init_apretures:
298 pqm_uninit(&process->pqm);
290err_process_pqm_init: 299err_process_pqm_init:
291 hash_del_rcu(&process->kfd_processes); 300 hash_del_rcu(&process->kfd_processes);
292 synchronize_rcu(); 301 synchronize_rcu();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5733e2859e8a..b11792d7e70e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
700 dev->node_props.simd_per_cu); 700 dev->node_props.simd_per_cu);
701 sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu", 701 sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
702 dev->node_props.max_slots_scratch_cu); 702 dev->node_props.max_slots_scratch_cu);
703 sysfs_show_32bit_prop(buffer, "engine_id",
704 dev->node_props.engine_id);
705 sysfs_show_32bit_prop(buffer, "vendor_id", 703 sysfs_show_32bit_prop(buffer, "vendor_id",
706 dev->node_props.vendor_id); 704 dev->node_props.vendor_id);
707 sysfs_show_32bit_prop(buffer, "device_id", 705 sysfs_show_32bit_prop(buffer, "device_id",
@@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
715 dev->gpu->kgd)); 713 dev->gpu->kgd));
716 sysfs_show_64bit_prop(buffer, "local_mem_size", 714 sysfs_show_64bit_prop(buffer, "local_mem_size",
717 kfd2kgd->get_vmem_size(dev->gpu->kgd)); 715 kfd2kgd->get_vmem_size(dev->gpu->kgd));
716
717 sysfs_show_32bit_prop(buffer, "fw_version",
718 kfd2kgd->get_fw_version(
719 dev->gpu->kgd,
720 KGD_ENGINE_MEC1));
721
718 } 722 }
719 723
720 ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", 724 ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9c729dd8dd50..47b551970a14 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -45,6 +45,17 @@ enum kgd_memory_pool {
45 KGD_POOL_FRAMEBUFFER = 3, 45 KGD_POOL_FRAMEBUFFER = 3,
46}; 46};
47 47
48enum kgd_engine_type {
49 KGD_ENGINE_PFP = 1,
50 KGD_ENGINE_ME,
51 KGD_ENGINE_CE,
52 KGD_ENGINE_MEC1,
53 KGD_ENGINE_MEC2,
54 KGD_ENGINE_RLC,
55 KGD_ENGINE_SDMA,
56 KGD_ENGINE_MAX
57};
58
48struct kgd2kfd_shared_resources { 59struct kgd2kfd_shared_resources {
49 /* Bit n == 1 means VMID n is available for KFD. */ 60 /* Bit n == 1 means VMID n is available for KFD. */
50 unsigned int compute_vmid_bitmap; 61 unsigned int compute_vmid_bitmap;
@@ -137,6 +148,8 @@ struct kgd2kfd_calls {
137 * 148 *
138 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot. 149 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
139 * 150 *
151 * @get_fw_version: Returns FW versions from the header
152 *
140 * This structure contains function pointers to services that the kgd driver 153 * This structure contains function pointers to services that the kgd driver
141 * provides to amdkfd driver. 154 * provides to amdkfd driver.
142 * 155 *
@@ -176,6 +189,8 @@ struct kfd2kgd_calls {
176 int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type, 189 int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
177 unsigned int timeout, uint32_t pipe_id, 190 unsigned int timeout, uint32_t pipe_id,
178 uint32_t queue_id); 191 uint32_t queue_id);
192 uint16_t (*get_fw_version)(struct kgd_dev *kgd,
193 enum kgd_engine_type type);
179}; 194};
180 195
181bool kgd2kfd_init(unsigned interface_version, 196bool kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4a78a773151c..bbdbe4721573 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
61 struct drm_crtc_state *crtc_state; 61 struct drm_crtc_state *crtc_state;
62 62
63 if (plane->state->crtc) { 63 if (plane->state->crtc) {
64 crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)]; 64 crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
65 65
66 if (WARN_ON(!crtc_state)) 66 if (WARN_ON(!crtc_state))
67 return; 67 return;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f5a5f18efa5b..4d79dad9d44f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
830 * vblank events since the system was booted, including lost events due to 830 * vblank events since the system was booted, including lost events due to
831 * modesetting activity. 831 * modesetting activity.
832 * 832 *
833 * This is the legacy version of drm_crtc_vblank_count().
834 *
833 * Returns: 835 * Returns:
834 * The software vblank counter. 836 * The software vblank counter.
835 */ 837 */
@@ -844,6 +846,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
844EXPORT_SYMBOL(drm_vblank_count); 846EXPORT_SYMBOL(drm_vblank_count);
845 847
846/** 848/**
849 * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
850 * @crtc: which counter to retrieve
851 *
852 * Fetches the "cooked" vblank count value that represents the number of
853 * vblank events since the system was booted, including lost events due to
854 * modesetting activity.
855 *
856 * This is the native KMS version of drm_vblank_count().
857 *
858 * Returns:
859 * The software vblank counter.
860 */
861u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
862{
863 return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
864}
865EXPORT_SYMBOL(drm_crtc_vblank_count);
866
867/**
847 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value 868 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
848 * and the system timestamp corresponding to that vblank counter value. 869 * and the system timestamp corresponding to that vblank counter value.
849 * 870 *
@@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev,
904 * 925 *
905 * Updates sequence # and timestamp on event, and sends it to userspace. 926 * Updates sequence # and timestamp on event, and sends it to userspace.
906 * Caller must hold event lock. 927 * Caller must hold event lock.
928 *
929 * This is the legacy version of drm_crtc_send_vblank_event().
907 */ 930 */
908void drm_send_vblank_event(struct drm_device *dev, int crtc, 931void drm_send_vblank_event(struct drm_device *dev, int crtc,
909 struct drm_pending_vblank_event *e) 932 struct drm_pending_vblank_event *e)
@@ -923,6 +946,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
923EXPORT_SYMBOL(drm_send_vblank_event); 946EXPORT_SYMBOL(drm_send_vblank_event);
924 947
925/** 948/**
949 * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
950 * @crtc: the source CRTC of the vblank event
951 * @e: the event to send
952 *
953 * Updates sequence # and timestamp on event, and sends it to userspace.
954 * Caller must hold event lock.
955 *
956 * This is the native KMS version of drm_send_vblank_event().
957 */
958void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
959 struct drm_pending_vblank_event *e)
960{
961 drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
962}
963EXPORT_SYMBOL(drm_crtc_send_vblank_event);
964
965/**
926 * drm_vblank_enable - enable the vblank interrupt on a CRTC 966 * drm_vblank_enable - enable the vblank interrupt on a CRTC
927 * @dev: DRM device 967 * @dev: DRM device
928 * @crtc: CRTC in question 968 * @crtc: CRTC in question
@@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1594 * 1634 *
1595 * Drivers should call this routine in their vblank interrupt handlers to 1635 * Drivers should call this routine in their vblank interrupt handlers to
1596 * update the vblank counter and send any signals that may be pending. 1636 * update the vblank counter and send any signals that may be pending.
1637 *
1638 * This is the legacy version of drm_crtc_handle_vblank().
1597 */ 1639 */
1598bool drm_handle_vblank(struct drm_device *dev, int crtc) 1640bool drm_handle_vblank(struct drm_device *dev, int crtc)
1599{ 1641{
@@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1670 return true; 1712 return true;
1671} 1713}
1672EXPORT_SYMBOL(drm_handle_vblank); 1714EXPORT_SYMBOL(drm_handle_vblank);
1715
1716/**
1717 * drm_crtc_handle_vblank - handle a vblank event
1718 * @crtc: where this event occurred
1719 *
1720 * Drivers should call this routine in their vblank interrupt handlers to
1721 * update the vblank counter and send any signals that may be pending.
1722 *
1723 * This is the native KMS version of drm_handle_vblank().
1724 *
1725 * Returns:
1726 * True if the event was successfully handled, false on failure.
1727 */
1728bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
1729{
1730 return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
1731}
1732EXPORT_SYMBOL(drm_crtc_handle_vblank);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f990ab4c3efb..574057cd1d09 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -811,6 +811,8 @@ int i915_reset(struct drm_device *dev)
811 if (!i915.reset) 811 if (!i915.reset)
812 return 0; 812 return 0;
813 813
814 intel_reset_gt_powersave(dev);
815
814 mutex_lock(&dev->struct_mutex); 816 mutex_lock(&dev->struct_mutex);
815 817
816 i915_gem_reset(dev); 818 i915_gem_reset(dev);
@@ -880,7 +882,7 @@ int i915_reset(struct drm_device *dev)
880 * of re-init after reset. 882 * of re-init after reset.
881 */ 883 */
882 if (INTEL_INFO(dev)->gen > 5) 884 if (INTEL_INFO(dev)->gen > 5)
883 intel_reset_gt_powersave(dev); 885 intel_enable_gt_powersave(dev);
884 } else { 886 } else {
885 mutex_unlock(&dev->struct_mutex); 887 mutex_unlock(&dev->struct_mutex);
886 } 888 }
@@ -1584,7 +1586,7 @@ static struct drm_driver driver = {
1584 .gem_prime_import = i915_gem_prime_import, 1586 .gem_prime_import = i915_gem_prime_import,
1585 1587
1586 .dumb_create = i915_gem_dumb_create, 1588 .dumb_create = i915_gem_dumb_create,
1587 .dumb_map_offset = i915_gem_dumb_map_offset, 1589 .dumb_map_offset = i915_gem_mmap_gtt,
1588 .dumb_destroy = drm_gem_dumb_destroy, 1590 .dumb_destroy = drm_gem_dumb_destroy,
1589 .ioctls = i915_ioctls, 1591 .ioctls = i915_ioctls,
1590 .fops = &i915_driver_fops, 1592 .fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 63bcda5541ec..70d0f0f06f1a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2501,9 +2501,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
2501int i915_gem_dumb_create(struct drm_file *file_priv, 2501int i915_gem_dumb_create(struct drm_file *file_priv,
2502 struct drm_device *dev, 2502 struct drm_device *dev,
2503 struct drm_mode_create_dumb *args); 2503 struct drm_mode_create_dumb *args);
2504int i915_gem_dumb_map_offset(struct drm_file *file_priv, 2504int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2505 struct drm_device *dev, uint32_t handle, 2505 uint32_t handle, uint64_t *offset);
2506 uint64_t *offset);
2507/** 2506/**
2508 * Returns true if seq1 is later than seq2. 2507 * Returns true if seq1 is later than seq2.
2509 */ 2508 */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a9faea626db..52adcb680be3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -401,7 +401,6 @@ static int
401i915_gem_create(struct drm_file *file, 401i915_gem_create(struct drm_file *file,
402 struct drm_device *dev, 402 struct drm_device *dev,
403 uint64_t size, 403 uint64_t size,
404 bool dumb,
405 uint32_t *handle_p) 404 uint32_t *handle_p)
406{ 405{
407 struct drm_i915_gem_object *obj; 406 struct drm_i915_gem_object *obj;
@@ -417,7 +416,6 @@ i915_gem_create(struct drm_file *file,
417 if (obj == NULL) 416 if (obj == NULL)
418 return -ENOMEM; 417 return -ENOMEM;
419 418
420 obj->base.dumb = dumb;
421 ret = drm_gem_handle_create(file, &obj->base, &handle); 419 ret = drm_gem_handle_create(file, &obj->base, &handle);
422 /* drop reference from allocate - handle holds it now */ 420 /* drop reference from allocate - handle holds it now */
423 drm_gem_object_unreference_unlocked(&obj->base); 421 drm_gem_object_unreference_unlocked(&obj->base);
@@ -437,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file,
437 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 435 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
438 args->size = args->pitch * args->height; 436 args->size = args->pitch * args->height;
439 return i915_gem_create(file, dev, 437 return i915_gem_create(file, dev,
440 args->size, true, &args->handle); 438 args->size, &args->handle);
441} 439}
442 440
443/** 441/**
@@ -450,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
450 struct drm_i915_gem_create *args = data; 448 struct drm_i915_gem_create *args = data;
451 449
452 return i915_gem_create(file, dev, 450 return i915_gem_create(file, dev,
453 args->size, false, &args->handle); 451 args->size, &args->handle);
454} 452}
455 453
456static inline int 454static inline int
@@ -1840,10 +1838,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1840 drm_gem_free_mmap_offset(&obj->base); 1838 drm_gem_free_mmap_offset(&obj->base);
1841} 1839}
1842 1840
1843static int 1841int
1844i915_gem_mmap_gtt(struct drm_file *file, 1842i915_gem_mmap_gtt(struct drm_file *file,
1845 struct drm_device *dev, 1843 struct drm_device *dev,
1846 uint32_t handle, bool dumb, 1844 uint32_t handle,
1847 uint64_t *offset) 1845 uint64_t *offset)
1848{ 1846{
1849 struct drm_i915_private *dev_priv = dev->dev_private; 1847 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1860,13 +1858,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
1860 goto unlock; 1858 goto unlock;
1861 } 1859 }
1862 1860
1863 /*
1864 * We don't allow dumb mmaps on objects created using another
1865 * interface.
1866 */
1867 WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
1868 "Illegal dumb map of accelerated buffer.\n");
1869
1870 if (obj->base.size > dev_priv->gtt.mappable_end) { 1861 if (obj->base.size > dev_priv->gtt.mappable_end) {
1871 ret = -E2BIG; 1862 ret = -E2BIG;
1872 goto out; 1863 goto out;
@@ -1891,15 +1882,6 @@ unlock:
1891 return ret; 1882 return ret;
1892} 1883}
1893 1884
1894int
1895i915_gem_dumb_map_offset(struct drm_file *file,
1896 struct drm_device *dev,
1897 uint32_t handle,
1898 uint64_t *offset)
1899{
1900 return i915_gem_mmap_gtt(file, dev, handle, true, offset);
1901}
1902
1903/** 1885/**
1904 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1886 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1905 * @dev: DRM device 1887 * @dev: DRM device
@@ -1921,7 +1903,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1921{ 1903{
1922 struct drm_i915_gem_mmap_gtt *args = data; 1904 struct drm_i915_gem_mmap_gtt *args = data;
1923 1905
1924 return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset); 1906 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1925} 1907}
1926 1908
1927static inline int 1909static inline int
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d17ff435f276..d011ec82ef1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -473,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring,
473 u32 hw_flags) 473 u32 hw_flags)
474{ 474{
475 u32 flags = hw_flags | MI_MM_SPACE_GTT; 475 u32 flags = hw_flags | MI_MM_SPACE_GTT;
476 int ret; 476 const int num_rings =
477 /* Use an extended w/a on ivb+ if signalling from other rings */
478 i915_semaphore_is_enabled(ring->dev) ?
479 hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
480 0;
481 int len, i, ret;
477 482
478 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 483 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
479 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value 484 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
@@ -490,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring,
490 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8) 495 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
491 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 496 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
492 497
493 ret = intel_ring_begin(ring, 6); 498
499 len = 4;
500 if (INTEL_INFO(ring->dev)->gen >= 7)
501 len += 2 + (num_rings ? 4*num_rings + 2 : 0);
502
503 ret = intel_ring_begin(ring, len);
494 if (ret) 504 if (ret)
495 return ret; 505 return ret;
496 506
497 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 507 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
498 if (INTEL_INFO(ring->dev)->gen >= 7) 508 if (INTEL_INFO(ring->dev)->gen >= 7) {
499 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 509 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
500 else 510 if (num_rings) {
501 intel_ring_emit(ring, MI_NOOP); 511 struct intel_engine_cs *signaller;
512
513 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
514 for_each_ring(signaller, to_i915(ring->dev), i) {
515 if (signaller == ring)
516 continue;
517
518 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
519 intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
520 }
521 }
522 }
502 523
503 intel_ring_emit(ring, MI_NOOP); 524 intel_ring_emit(ring, MI_NOOP);
504 intel_ring_emit(ring, MI_SET_CONTEXT); 525 intel_ring_emit(ring, MI_SET_CONTEXT);
@@ -510,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring,
510 */ 531 */
511 intel_ring_emit(ring, MI_NOOP); 532 intel_ring_emit(ring, MI_NOOP);
512 533
513 if (INTEL_INFO(ring->dev)->gen >= 7) 534 if (INTEL_INFO(ring->dev)->gen >= 7) {
535 if (num_rings) {
536 struct intel_engine_cs *signaller;
537
538 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
539 for_each_ring(signaller, to_i915(ring->dev), i) {
540 if (signaller == ring)
541 continue;
542
543 intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
544 intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
545 }
546 }
514 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); 547 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
515 else 548 }
516 intel_ring_emit(ring, MI_NOOP);
517 549
518 intel_ring_advance(ring); 550 intel_ring_advance(ring);
519 551
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f06027ba3ee5..11738316394a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
121 goto err; 121 goto err;
122 } 122 }
123 123
124 WARN_ONCE(obj->base.dumb,
125 "GPU use of dumb buffer is illegal.\n");
126
127 drm_gem_object_reference(&obj->base); 124 drm_gem_object_reference(&obj->base);
128 list_add_tail(&obj->obj_exec_link, &objects); 125 list_add_tail(&obj->obj_exec_link, &objects);
129 } 126 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 981834b0f9b6..996c2931c499 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -281,10 +281,14 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
281 struct drm_i915_private *dev_priv = dev->dev_private; 281 struct drm_i915_private *dev_priv = dev->dev_private;
282 282
283 spin_lock_irq(&dev_priv->irq_lock); 283 spin_lock_irq(&dev_priv->irq_lock);
284
284 WARN_ON(dev_priv->rps.pm_iir); 285 WARN_ON(dev_priv->rps.pm_iir);
285 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 286 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
286 dev_priv->rps.interrupts_enabled = true; 287 dev_priv->rps.interrupts_enabled = true;
288 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
289 dev_priv->pm_rps_events);
287 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 290 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
291
288 spin_unlock_irq(&dev_priv->irq_lock); 292 spin_unlock_irq(&dev_priv->irq_lock);
289} 293}
290 294
@@ -3307,8 +3311,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3307 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3311 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3308 3312
3309 if (INTEL_INFO(dev)->gen >= 6) { 3313 if (INTEL_INFO(dev)->gen >= 6) {
3310 pm_irqs |= dev_priv->pm_rps_events; 3314 /*
3311 3315 * RPS interrupts will get enabled/disabled on demand when RPS
3316 * itself is enabled/disabled.
3317 */
3312 if (HAS_VEBOX(dev)) 3318 if (HAS_VEBOX(dev))
3313 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3319 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3314 3320
@@ -3520,7 +3526,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3520 dev_priv->pm_irq_mask = 0xffffffff; 3526 dev_priv->pm_irq_mask = 0xffffffff;
3521 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3527 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3522 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3528 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3523 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events); 3529 /*
3530 * RPS interrupts will get enabled/disabled on demand when RPS itself
3531 * is enabled/disabled.
3532 */
3533 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3524 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3534 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3525} 3535}
3526 3536
@@ -3609,7 +3619,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3609 3619
3610 vlv_display_irq_reset(dev_priv); 3620 vlv_display_irq_reset(dev_priv);
3611 3621
3612 dev_priv->irq_mask = 0; 3622 dev_priv->irq_mask = ~0;
3613} 3623}
3614 3624
3615static void valleyview_irq_uninstall(struct drm_device *dev) 3625static void valleyview_irq_uninstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index eefdc238f70b..172de3b3433b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -395,6 +395,7 @@
395#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) 395#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
396#define PIPE_CONTROL_CS_STALL (1<<20) 396#define PIPE_CONTROL_CS_STALL (1<<20)
397#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 397#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
398#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
398#define PIPE_CONTROL_QW_WRITE (1<<14) 399#define PIPE_CONTROL_QW_WRITE (1<<14)
399#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) 400#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
400#define PIPE_CONTROL_DEPTH_STALL (1<<13) 401#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -1128,6 +1129,7 @@ enum punit_power_well {
1128#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) 1129#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
1129#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) 1130#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
1130#define GEN6_NOSYNC 0 1131#define GEN6_NOSYNC 0
1132#define RING_PSMI_CTL(base) ((base)+0x50)
1131#define RING_MAX_IDLE(base) ((base)+0x54) 1133#define RING_MAX_IDLE(base) ((base)+0x54)
1132#define RING_HWS_PGA(base) ((base)+0x80) 1134#define RING_HWS_PGA(base) ((base)+0x80)
1133#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 1135#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -1458,6 +1460,7 @@ enum punit_power_well {
1458#define GEN6_BLITTER_FBC_NOTIFY (1<<3) 1460#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
1459 1461
1460#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 1462#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
1463#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
1461#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) 1464#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
1462#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) 1465#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
1463 1466
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1f4b56e273c8..964b28e3c630 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6191,6 +6191,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
6191 valleyview_cleanup_gt_powersave(dev); 6191 valleyview_cleanup_gt_powersave(dev);
6192} 6192}
6193 6193
6194static void gen6_suspend_rps(struct drm_device *dev)
6195{
6196 struct drm_i915_private *dev_priv = dev->dev_private;
6197
6198 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6199
6200 /*
6201 * TODO: disable RPS interrupts on GEN9+ too once RPS support
6202 * is added for it.
6203 */
6204 if (INTEL_INFO(dev)->gen < 9)
6205 gen6_disable_rps_interrupts(dev);
6206}
6207
6194/** 6208/**
6195 * intel_suspend_gt_powersave - suspend PM work and helper threads 6209 * intel_suspend_gt_powersave - suspend PM work and helper threads
6196 * @dev: drm device 6210 * @dev: drm device
@@ -6206,14 +6220,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
6206 if (INTEL_INFO(dev)->gen < 6) 6220 if (INTEL_INFO(dev)->gen < 6)
6207 return; 6221 return;
6208 6222
6209 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6223 gen6_suspend_rps(dev);
6210
6211 /*
6212 * TODO: disable RPS interrupts on GEN9+ too once RPS support
6213 * is added for it.
6214 */
6215 if (INTEL_INFO(dev)->gen < 9)
6216 gen6_disable_rps_interrupts(dev);
6217 6224
6218 /* Force GPU to min freq during suspend */ 6225 /* Force GPU to min freq during suspend */
6219 gen6_rps_idle(dev_priv); 6226 gen6_rps_idle(dev_priv);
@@ -6316,8 +6323,11 @@ void intel_reset_gt_powersave(struct drm_device *dev)
6316{ 6323{
6317 struct drm_i915_private *dev_priv = dev->dev_private; 6324 struct drm_i915_private *dev_priv = dev->dev_private;
6318 6325
6326 if (INTEL_INFO(dev)->gen < 6)
6327 return;
6328
6329 gen6_suspend_rps(dev);
6319 dev_priv->rps.enabled = false; 6330 dev_priv->rps.enabled = false;
6320 intel_enable_gt_powersave(dev);
6321} 6331}
6322 6332
6323static void ibx_init_clock_gating(struct drm_device *dev) 6333static void ibx_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9f445e9a75d1..c7bc93d28d84 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
362 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 362 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
363 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 363 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
364 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 364 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
365 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
365 /* 366 /*
366 * TLB invalidate requires a post-sync write. 367 * TLB invalidate requires a post-sync write.
367 */ 368 */
368 flags |= PIPE_CONTROL_QW_WRITE; 369 flags |= PIPE_CONTROL_QW_WRITE;
369 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 370 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
370 371
372 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
373
371 /* Workaround: we must issue a pipe_control with CS-stall bit 374 /* Workaround: we must issue a pipe_control with CS-stall bit
372 * set before a pipe_control command that has the state cache 375 * set before a pipe_control command that has the state cache
373 * invalidate bit set. */ 376 * invalidate bit set. */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index aa873048308b..94a5bee69fe7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
386 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); 386 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
387 drm_gem_object_unreference(gpu->memptrs_bo); 387 drm_gem_object_unreference(gpu->memptrs_bo);
388 } 388 }
389 if (gpu->pm4) 389 release_firmware(gpu->pm4);
390 release_firmware(gpu->pm4); 390 release_firmware(gpu->pfp);
391 if (gpu->pfp)
392 release_firmware(gpu->pfp);
393 msm_gpu_cleanup(&gpu->base); 391 msm_gpu_cleanup(&gpu->base);
394} 392}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index fbebb0405d76..b4e70e0e3cfa 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
141 uint32_t hpd_ctrl; 141 uint32_t hpd_ctrl;
142 int i, ret; 142 int i, ret;
143 143
144 for (i = 0; i < config->hpd_reg_cnt; i++) {
145 ret = regulator_enable(hdmi->hpd_regs[i]);
146 if (ret) {
147 dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
148 config->hpd_reg_names[i], ret);
149 goto fail;
150 }
151 }
152
144 ret = gpio_config(hdmi, true); 153 ret = gpio_config(hdmi, true);
145 if (ret) { 154 if (ret) {
146 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret); 155 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
@@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
164 } 173 }
165 } 174 }
166 175
167 for (i = 0; i < config->hpd_reg_cnt; i++) {
168 ret = regulator_enable(hdmi->hpd_regs[i]);
169 if (ret) {
170 dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
171 config->hpd_reg_names[i], ret);
172 goto fail;
173 }
174 }
175
176 hdmi_set_mode(hdmi, false); 176 hdmi_set_mode(hdmi, false);
177 phy->funcs->reset(phy); 177 phy->funcs->reset(phy);
178 hdmi_set_mode(hdmi, true); 178 hdmi_set_mode(hdmi, true);
@@ -200,7 +200,7 @@ fail:
200 return ret; 200 return ret;
201} 201}
202 202
203static int hdp_disable(struct hdmi_connector *hdmi_connector) 203static void hdp_disable(struct hdmi_connector *hdmi_connector)
204{ 204{
205 struct hdmi *hdmi = hdmi_connector->hdmi; 205 struct hdmi *hdmi = hdmi_connector->hdmi;
206 const struct hdmi_platform_config *config = hdmi->config; 206 const struct hdmi_platform_config *config = hdmi->config;
@@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
212 212
213 hdmi_set_mode(hdmi, false); 213 hdmi_set_mode(hdmi, false);
214 214
215 for (i = 0; i < config->hpd_reg_cnt; i++) {
216 ret = regulator_disable(hdmi->hpd_regs[i]);
217 if (ret) {
218 dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
219 config->hpd_reg_names[i], ret);
220 goto fail;
221 }
222 }
223
224 for (i = 0; i < config->hpd_clk_cnt; i++) 215 for (i = 0; i < config->hpd_clk_cnt; i++)
225 clk_disable_unprepare(hdmi->hpd_clks[i]); 216 clk_disable_unprepare(hdmi->hpd_clks[i]);
226 217
227 ret = gpio_config(hdmi, false); 218 ret = gpio_config(hdmi, false);
228 if (ret) { 219 if (ret)
229 dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); 220 dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
230 goto fail;
231 }
232
233 return 0;
234 221
235fail: 222 for (i = 0; i < config->hpd_reg_cnt; i++) {
236 return ret; 223 ret = regulator_disable(hdmi->hpd_regs[i]);
224 if (ret)
225 dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
226 config->hpd_reg_names[i], ret);
227 }
237} 228}
238 229
239static void 230static void
@@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector)
260 (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) { 251 (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
261 bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED); 252 bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
262 253
263 DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); 254 /* ack & disable (temporarily) HPD events: */
264
265 /* ack the irq: */
266 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 255 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
267 hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); 256 HDMI_HPD_INT_CTRL_INT_ACK);
257
258 DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
268 259
269 /* detect disconnect if we are connected or visa versa: */ 260 /* detect disconnect if we are connected or visa versa: */
270 hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; 261 hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index a7672e100d8b..3449213f1e76 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
331 struct drm_crtc_state *state) 331 struct drm_crtc_state *state)
332{ 332{
333 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 333 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
334 struct drm_device *dev = crtc->dev;
335
336 DBG("%s: check", mdp4_crtc->name); 334 DBG("%s: check", mdp4_crtc->name);
337
338 if (mdp4_crtc->event) {
339 dev_err(dev->dev, "already pending flip!\n");
340 return -EBUSY;
341 }
342
343 // TODO anything else to check? 335 // TODO anything else to check?
344
345 return 0; 336 return 0;
346} 337}
347 338
@@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
357 struct drm_device *dev = crtc->dev; 348 struct drm_device *dev = crtc->dev;
358 unsigned long flags; 349 unsigned long flags;
359 350
360 DBG("%s: flush", mdp4_crtc->name); 351 DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
361 352
362 WARN_ON(mdp4_crtc->event); 353 WARN_ON(mdp4_crtc->event);
363 354
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 0e9a2e3a82d7..f021f960a8a2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
303 303
304 DBG("%s: check", mdp5_crtc->name); 304 DBG("%s: check", mdp5_crtc->name);
305 305
306 if (mdp5_crtc->event) {
307 dev_err(dev->dev, "already pending flip!\n");
308 return -EBUSY;
309 }
310
311 /* request a free CTL, if none is already allocated for this CRTC */ 306 /* request a free CTL, if none is already allocated for this CRTC */
312 if (state->enable && !mdp5_crtc->ctl) { 307 if (state->enable && !mdp5_crtc->ctl) {
313 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc); 308 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
@@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
364 struct drm_device *dev = crtc->dev; 359 struct drm_device *dev = crtc->dev;
365 unsigned long flags; 360 unsigned long flags;
366 361
367 DBG("%s: flush", mdp5_crtc->name); 362 DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
368 363
369 WARN_ON(mdp5_crtc->event); 364 WARN_ON(mdp5_crtc->event);
370 365
@@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
460 /* now that we know what irq's we want: */ 455 /* now that we know what irq's we want: */
461 mdp5_crtc->err.irqmask = intf2err(intf); 456 mdp5_crtc->err.irqmask = intf2err(intf);
462 mdp5_crtc->vblank.irqmask = intf2vblank(intf); 457 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
463 458 mdp_irq_update(&mdp5_kms->base);
464 /* when called from modeset_init(), skip the rest until later: */
465 if (!mdp5_kms)
466 return;
467 459
468 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 460 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
469 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); 461 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index a11f1b80c488..9f01a4f21af2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
216 goto fail; 216 goto fail;
217 } 217 }
218 218
219 /* NOTE: the vsync and error irq's are actually associated with 219 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
220 * the INTF/encoder.. the easiest way to deal with this (ie. what
221 * we do now) is assume a fixed relationship between crtc's and
222 * encoders. I'm not sure if there is ever a need to more freely
223 * assign crtcs to encoders, but if there is then we need to take
224 * care of error and vblank irq's that the crtc has registered,
225 * and also update user-requested vblank_mask.
226 */
227 encoder->possible_crtcs = BIT(0);
228 mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
229
230 priv->encoders[priv->num_encoders++] = encoder; 220 priv->encoders[priv->num_encoders++] = encoder;
231 221
232 /* Construct bridge/connector for HDMI: */ 222 /* Construct bridge/connector for HDMI: */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 03455b64a245..2a731722d840 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms)
42 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); 42 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
43} 43}
44 44
45static void update_irq_unlocked(struct mdp_kms *mdp_kms) 45/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
46 * link changes, this must be called to figure out the new global irqmask
47 */
48void mdp_irq_update(struct mdp_kms *mdp_kms)
46{ 49{
47 unsigned long flags; 50 unsigned long flags;
48 spin_lock_irqsave(&list_lock, flags); 51 spin_lock_irqsave(&list_lock, flags);
@@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
122 spin_unlock_irqrestore(&list_lock, flags); 125 spin_unlock_irqrestore(&list_lock, flags);
123 126
124 if (needs_update) 127 if (needs_update)
125 update_irq_unlocked(mdp_kms); 128 mdp_irq_update(mdp_kms);
126} 129}
127 130
128void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) 131void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
@@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
141 spin_unlock_irqrestore(&list_lock, flags); 144 spin_unlock_irqrestore(&list_lock, flags);
142 145
143 if (needs_update) 146 if (needs_update)
144 update_irq_unlocked(mdp_kms); 147 mdp_irq_update(mdp_kms);
145} 148}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 99557b5ad4fd..b268ce95d394 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
75void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); 75void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
76void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); 76void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
77void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); 77void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
78 78void mdp_irq_update(struct mdp_kms *mdp_kms);
79 79
80/* 80/*
81 * pixel format helpers: 81 * pixel format helpers:
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0de412e13dc..191968256c58 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -23,10 +23,41 @@ struct msm_commit {
23 struct drm_atomic_state *state; 23 struct drm_atomic_state *state;
24 uint32_t fence; 24 uint32_t fence;
25 struct msm_fence_cb fence_cb; 25 struct msm_fence_cb fence_cb;
26 uint32_t crtc_mask;
26}; 27};
27 28
28static void fence_cb(struct msm_fence_cb *cb); 29static void fence_cb(struct msm_fence_cb *cb);
29 30
31/* block until specified crtcs are no longer pending update, and
32 * atomically mark them as pending update
33 */
34static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
35{
36 int ret;
37
38 spin_lock(&priv->pending_crtcs_event.lock);
39 ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
40 !(priv->pending_crtcs & crtc_mask));
41 if (ret == 0) {
42 DBG("start: %08x", crtc_mask);
43 priv->pending_crtcs |= crtc_mask;
44 }
45 spin_unlock(&priv->pending_crtcs_event.lock);
46
47 return ret;
48}
49
50/* clear specified crtcs (no longer pending update)
51 */
52static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
53{
54 spin_lock(&priv->pending_crtcs_event.lock);
55 DBG("end: %08x", crtc_mask);
56 priv->pending_crtcs &= ~crtc_mask;
57 wake_up_all_locked(&priv->pending_crtcs_event);
58 spin_unlock(&priv->pending_crtcs_event.lock);
59}
60
30static struct msm_commit *new_commit(struct drm_atomic_state *state) 61static struct msm_commit *new_commit(struct drm_atomic_state *state)
31{ 62{
32 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); 63 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
@@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c)
58 89
59 drm_atomic_helper_commit_post_planes(dev, state); 90 drm_atomic_helper_commit_post_planes(dev, state);
60 91
92 /* NOTE: _wait_for_vblanks() only waits for vblank on
93 * enabled CRTCs. So we end up faulting when disabling
94 * due to (potentially) unref'ing the outgoing fb's
95 * before the vblank when the disable has latched.
96 *
97 * But if it did wait on disabled (or newly disabled)
98 * CRTCs, that would be racy (ie. we could have missed
99 * the irq. We need some way to poll for pipe shut
100 * down. Or just live with occasionally hitting the
101 * timeout in the CRTC disable path (which really should
102 * not be critical path)
103 */
104
61 drm_atomic_helper_wait_for_vblanks(dev, state); 105 drm_atomic_helper_wait_for_vblanks(dev, state);
62 106
63 drm_atomic_helper_cleanup_planes(dev, state); 107 drm_atomic_helper_cleanup_planes(dev, state);
64 108
65 drm_atomic_state_free(state); 109 drm_atomic_state_free(state);
66 110
111 end_atomic(dev->dev_private, c->crtc_mask);
112
67 kfree(c); 113 kfree(c);
68} 114}
69 115
@@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
97int msm_atomic_commit(struct drm_device *dev, 143int msm_atomic_commit(struct drm_device *dev,
98 struct drm_atomic_state *state, bool async) 144 struct drm_atomic_state *state, bool async)
99{ 145{
100 struct msm_commit *c;
101 int nplanes = dev->mode_config.num_total_plane; 146 int nplanes = dev->mode_config.num_total_plane;
147 int ncrtcs = dev->mode_config.num_crtc;
148 struct msm_commit *c;
102 int i, ret; 149 int i, ret;
103 150
104 ret = drm_atomic_helper_prepare_planes(dev, state); 151 ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev,
106 return ret; 153 return ret;
107 154
108 c = new_commit(state); 155 c = new_commit(state);
156 if (!c)
157 return -ENOMEM;
158
159 /*
160 * Figure out what crtcs we have:
161 */
162 for (i = 0; i < ncrtcs; i++) {
163 struct drm_crtc *crtc = state->crtcs[i];
164 if (!crtc)
165 continue;
166 c->crtc_mask |= (1 << drm_crtc_index(crtc));
167 }
109 168
110 /* 169 /*
111 * Figure out what fence to wait for: 170 * Figure out what fence to wait for:
@@ -122,6 +181,14 @@ int msm_atomic_commit(struct drm_device *dev,
122 } 181 }
123 182
124 /* 183 /*
184 * Wait for pending updates on any of the same crtc's and then
185 * mark our set of crtc's as busy:
186 */
187 ret = start_atomic(dev->dev_private, c->crtc_mask);
188 if (ret)
189 return ret;
190
191 /*
125 * This is the point of no return - everything below never fails except 192 * This is the point of no return - everything below never fails except
126 * when the hw goes bonghits. Which means we can commit the new state on 193 * when the hw goes bonghits. Which means we can commit the new state on
127 * the software side now. 194 * the software side now.
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c795217e1bfc..9a61546a0b05 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
193 193
194 priv->wq = alloc_ordered_workqueue("msm", 0); 194 priv->wq = alloc_ordered_workqueue("msm", 0);
195 init_waitqueue_head(&priv->fence_event); 195 init_waitqueue_head(&priv->fence_event);
196 init_waitqueue_head(&priv->pending_crtcs_event);
196 197
197 INIT_LIST_HEAD(&priv->inactive_list); 198 INIT_LIST_HEAD(&priv->inactive_list);
198 INIT_LIST_HEAD(&priv->fence_cbs); 199 INIT_LIST_HEAD(&priv->fence_cbs);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 136303818436..b69ef2d5a26c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -96,6 +96,10 @@ struct msm_drm_private {
96 /* callbacks deferred until bo is inactive: */ 96 /* callbacks deferred until bo is inactive: */
97 struct list_head fence_cbs; 97 struct list_head fence_cbs;
98 98
99 /* crtcs pending async atomic updates: */
100 uint32_t pending_crtcs;
101 wait_queue_head_t pending_crtcs_event;
102
99 /* registered MMUs: */ 103 /* registered MMUs: */
100 unsigned int num_mmus; 104 unsigned int num_mmus;
101 struct msm_mmu *mmus[NUM_DOMAINS]; 105 struct msm_mmu *mmus[NUM_DOMAINS];
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 94d55e526b4e..1f3af13ccede 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -190,8 +190,7 @@ fail_unlock:
190fail: 190fail:
191 191
192 if (ret) { 192 if (ret) {
193 if (fbi) 193 framebuffer_release(fbi);
194 framebuffer_release(fbi);
195 if (fb) { 194 if (fb) {
196 drm_framebuffer_unregister_private(fb); 195 drm_framebuffer_unregister_private(fb);
197 drm_framebuffer_remove(fb); 196 drm_framebuffer_remove(fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4a6f0e49d5b5..49dea4fb55ac 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
535 drm_free_large(msm_obj->pages); 535 drm_free_large(msm_obj->pages);
536 536
537 } else { 537 } else {
538 if (msm_obj->vaddr) 538 vunmap(msm_obj->vaddr);
539 vunmap(msm_obj->vaddr);
540 put_pages(obj); 539 put_pages(obj);
541 } 540 }
542 541
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 5d93902a91ab..f8042433752b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
876 if (ret) 876 if (ret)
877 return ret; 877 return ret;
878 878
879 bo->gem.dumb = true;
880 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle); 879 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
881 drm_gem_object_unreference_unlocked(&bo->gem); 880 drm_gem_object_unreference_unlocked(&bo->gem);
882 return ret; 881 return ret;
@@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
892 gem = drm_gem_object_lookup(dev, file_priv, handle); 891 gem = drm_gem_object_lookup(dev, file_priv, handle);
893 if (gem) { 892 if (gem) {
894 struct nouveau_bo *bo = nouveau_gem_object(gem); 893 struct nouveau_bo *bo = nouveau_gem_object(gem);
895
896 /*
897 * We don't allow dumb mmaps on objects created using another
898 * interface.
899 */
900 WARN_ONCE(!(gem->dumb || gem->import_attach),
901 "Illegal dumb map of accelerated buffer.\n");
902
903 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); 894 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
904 drm_gem_object_unreference_unlocked(gem); 895 drm_gem_object_unreference_unlocked(gem);
905 return 0; 896 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 28d51a22a4bf..42c34babc2e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -444,9 +444,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
444 list_for_each_entry(nvbo, list, entry) { 444 list_for_each_entry(nvbo, list, entry) {
445 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 445 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
446 446
447 WARN_ONCE(nvbo->gem.dumb,
448 "GPU use of dumb buffer is illegal.\n");
449
450 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, 447 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
451 b->write_domains, 448 b->write_domains,
452 b->valid_domains); 449 b->valid_domains);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 753a6def61e7..3d1cfcb96b6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,6 +28,7 @@
28#include "nouveau_ttm.h" 28#include "nouveau_ttm.h"
29#include "nouveau_gem.h" 29#include "nouveau_gem.h"
30 30
31#include "drm_legacy.h"
31static int 32static int
32nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 33nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
33{ 34{
@@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
281 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); 282 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
282 283
283 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 284 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
284 return -EINVAL; 285 return drm_legacy_mmap(filp, vma);
285 286
286 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); 287 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
287} 288}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe48f229043e..a46f73737994 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
394 return r; 394 return r;
395} 395}
396 396
397static int radeon_mode_mmap(struct drm_file *filp, 397int radeon_mode_dumb_mmap(struct drm_file *filp,
398 struct drm_device *dev, 398 struct drm_device *dev,
399 uint32_t handle, bool dumb, 399 uint32_t handle, uint64_t *offset_p)
400 uint64_t *offset_p)
401{ 400{
402 struct drm_gem_object *gobj; 401 struct drm_gem_object *gobj;
403 struct radeon_bo *robj; 402 struct radeon_bo *robj;
@@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp,
406 if (gobj == NULL) { 405 if (gobj == NULL) {
407 return -ENOENT; 406 return -ENOENT;
408 } 407 }
409
410 /*
411 * We don't allow dumb mmaps on objects created using another
412 * interface.
413 */
414 WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
415 "Illegal dumb map of GPU buffer.\n");
416
417 robj = gem_to_radeon_bo(gobj); 408 robj = gem_to_radeon_bo(gobj);
418 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 409 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
419 drm_gem_object_unreference_unlocked(gobj); 410 drm_gem_object_unreference_unlocked(gobj);
@@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp,
424 return 0; 415 return 0;
425} 416}
426 417
427int radeon_mode_dumb_mmap(struct drm_file *filp,
428 struct drm_device *dev,
429 uint32_t handle, uint64_t *offset_p)
430{
431 return radeon_mode_mmap(filp, dev, handle, true, offset_p);
432}
433
434int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 418int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
435 struct drm_file *filp) 419 struct drm_file *filp)
436{ 420{
437 struct drm_radeon_gem_mmap *args = data; 421 struct drm_radeon_gem_mmap *args = data;
438 422
439 return radeon_mode_mmap(filp, dev, args->handle, false, 423 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
440 &args->addr_ptr);
441} 424}
442 425
443int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 426int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
763 return -ENOMEM; 746 return -ENOMEM;
764 747
765 r = drm_gem_handle_create(file_priv, gobj, &handle); 748 r = drm_gem_handle_create(file_priv, gobj, &handle);
766 gobj->dumb = true;
767 /* drop reference from allocate - handle holds it now */ 749 /* drop reference from allocate - handle holds it now */
768 drm_gem_object_unreference_unlocked(gobj); 750 drm_gem_object_unreference_unlocked(gobj);
769 if (r) { 751 if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 065d02068ec3..242fd8b1b221 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -28,6 +28,8 @@
28#include "cikd.h" 28#include "cikd.h"
29#include "cik_reg.h" 29#include "cik_reg.h"
30#include "radeon_kfd.h" 30#include "radeon_kfd.h"
31#include "radeon_ucode.h"
32#include <linux/firmware.h>
31 33
32#define CIK_PIPE_PER_MEC (4) 34#define CIK_PIPE_PER_MEC (4)
33 35
@@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd);
49static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); 51static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
50 52
51static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); 53static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
54static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
52 55
53/* 56/*
54 * Register access functions 57 * Register access functions
@@ -91,6 +94,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
91 .hqd_load = kgd_hqd_load, 94 .hqd_load = kgd_hqd_load,
92 .hqd_is_occupies = kgd_hqd_is_occupies, 95 .hqd_is_occupies = kgd_hqd_is_occupies,
93 .hqd_destroy = kgd_hqd_destroy, 96 .hqd_destroy = kgd_hqd_destroy,
97 .get_fw_version = get_fw_version
94}; 98};
95 99
96static const struct kgd2kfd_calls *kgd2kfd; 100static const struct kgd2kfd_calls *kgd2kfd;
@@ -561,3 +565,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
561 release_queue(kgd); 565 release_queue(kgd);
562 return 0; 566 return 0;
563} 567}
568
569static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
570{
571 struct radeon_device *rdev = (struct radeon_device *) kgd;
572 const union radeon_firmware_header *hdr;
573
574 BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
575
576 switch (type) {
577 case KGD_ENGINE_PFP:
578 hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
579 break;
580
581 case KGD_ENGINE_ME:
582 hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
583 break;
584
585 case KGD_ENGINE_CE:
586 hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
587 break;
588
589 case KGD_ENGINE_MEC1:
590 hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
591 break;
592
593 case KGD_ENGINE_MEC2:
594 hdr = (const union radeon_firmware_header *)
595 rdev->mec2_fw->data;
596 break;
597
598 case KGD_ENGINE_RLC:
599 hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
600 break;
601
602 case KGD_ENGINE_SDMA:
603 hdr = (const union radeon_firmware_header *)
604 rdev->sdma_fw->data;
605 break;
606
607 default:
608 return 0;
609 }
610
611 if (hdr == NULL)
612 return 0;
613
614 /* Only 12 bit in use*/
615 return hdr->common.ucode_version;
616}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d68223eb469..86fc56434b28 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -529,9 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
529 u32 current_domain = 529 u32 current_domain =
530 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 530 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
531 531
532 WARN_ONCE(bo->gem_base.dumb,
533 "GPU use of dumb buffer is illegal.\n");
534
535 /* Check if this buffer will be moved and don't move it 532 /* Check if this buffer will be moved and don't move it
536 * if we have moved too many buffers for this IB already. 533 * if we have moved too many buffers for this IB already.
537 * 534 *
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3367960286a6..978993fa3a36 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
168 const struct tegra_dc_window *window) 168 const struct tegra_dc_window *window)
169{ 169{
170 unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp; 170 unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
171 unsigned long value; 171 unsigned long value, flags;
172 bool yuv, planar; 172 bool yuv, planar;
173 173
174 /* 174 /*
@@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
181 else 181 else
182 bpp = planar ? 1 : 2; 182 bpp = planar ? 1 : 2;
183 183
184 spin_lock_irqsave(&dc->lock, flags);
185
184 value = WINDOW_A_SELECT << index; 186 value = WINDOW_A_SELECT << index;
185 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); 187 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
186 188
@@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
273 275
274 case TEGRA_BO_TILING_MODE_BLOCK: 276 case TEGRA_BO_TILING_MODE_BLOCK:
275 DRM_ERROR("hardware doesn't support block linear mode\n"); 277 DRM_ERROR("hardware doesn't support block linear mode\n");
278 spin_unlock_irqrestore(&dc->lock, flags);
276 return -EINVAL; 279 return -EINVAL;
277 } 280 }
278 281
@@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
331 334
332 tegra_dc_window_commit(dc, index); 335 tegra_dc_window_commit(dc, index);
333 336
337 spin_unlock_irqrestore(&dc->lock, flags);
338
334 return 0; 339 return 0;
335} 340}
336 341
@@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
338{ 343{
339 struct tegra_dc *dc = to_tegra_dc(plane->crtc); 344 struct tegra_dc *dc = to_tegra_dc(plane->crtc);
340 struct tegra_plane *p = to_tegra_plane(plane); 345 struct tegra_plane *p = to_tegra_plane(plane);
346 unsigned long flags;
341 u32 value; 347 u32 value;
342 348
343 if (!plane->crtc) 349 if (!plane->crtc)
344 return 0; 350 return 0;
345 351
352 spin_lock_irqsave(&dc->lock, flags);
353
346 value = WINDOW_A_SELECT << p->index; 354 value = WINDOW_A_SELECT << p->index;
347 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); 355 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
348 356
@@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
352 360
353 tegra_dc_window_commit(dc, p->index); 361 tegra_dc_window_commit(dc, p->index);
354 362
363 spin_unlock_irqrestore(&dc->lock, flags);
364
355 return 0; 365 return 0;
356} 366}
357 367
@@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
699 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); 709 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
700 unsigned int h_offset = 0, v_offset = 0; 710 unsigned int h_offset = 0, v_offset = 0;
701 struct tegra_bo_tiling tiling; 711 struct tegra_bo_tiling tiling;
712 unsigned long value, flags;
702 unsigned int format, swap; 713 unsigned int format, swap;
703 unsigned long value;
704 int err; 714 int err;
705 715
706 err = tegra_fb_get_tiling(fb, &tiling); 716 err = tegra_fb_get_tiling(fb, &tiling);
707 if (err < 0) 717 if (err < 0)
708 return err; 718 return err;
709 719
720 spin_lock_irqsave(&dc->lock, flags);
721
710 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 722 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
711 723
712 value = fb->offsets[0] + y * fb->pitches[0] + 724 value = fb->offsets[0] + y * fb->pitches[0] +
@@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
752 764
753 case TEGRA_BO_TILING_MODE_BLOCK: 765 case TEGRA_BO_TILING_MODE_BLOCK:
754 DRM_ERROR("hardware doesn't support block linear mode\n"); 766 DRM_ERROR("hardware doesn't support block linear mode\n");
767 spin_unlock_irqrestore(&dc->lock, flags);
755 return -EINVAL; 768 return -EINVAL;
756 } 769 }
757 770
@@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
778 tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL); 791 tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
779 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); 792 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
780 793
794 spin_unlock_irqrestore(&dc->lock, flags);
795
781 return 0; 796 return 0;
782} 797}
783 798
@@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
814 unsigned long flags, base; 829 unsigned long flags, base;
815 struct tegra_bo *bo; 830 struct tegra_bo *bo;
816 831
817 if (!dc->event) 832 spin_lock_irqsave(&drm->event_lock, flags);
833
834 if (!dc->event) {
835 spin_unlock_irqrestore(&drm->event_lock, flags);
818 return; 836 return;
837 }
819 838
820 bo = tegra_fb_get_plane(crtc->primary->fb, 0); 839 bo = tegra_fb_get_plane(crtc->primary->fb, 0);
821 840
841 spin_lock_irqsave(&dc->lock, flags);
842
822 /* check if new start address has been latched */ 843 /* check if new start address has been latched */
844 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
823 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); 845 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
824 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); 846 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
825 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); 847 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
826 848
849 spin_unlock_irqrestore(&dc->lock, flags);
850
827 if (base == bo->paddr + crtc->primary->fb->offsets[0]) { 851 if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
828 spin_lock_irqsave(&drm->event_lock, flags); 852 drm_crtc_send_vblank_event(crtc, dc->event);
829 drm_send_vblank_event(drm, dc->pipe, dc->event); 853 drm_crtc_vblank_put(crtc);
830 drm_vblank_put(drm, dc->pipe);
831 dc->event = NULL; 854 dc->event = NULL;
832 spin_unlock_irqrestore(&drm->event_lock, flags);
833 } 855 }
856
857 spin_unlock_irqrestore(&drm->event_lock, flags);
834} 858}
835 859
836void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) 860void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
@@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
843 867
844 if (dc->event && dc->event->base.file_priv == file) { 868 if (dc->event && dc->event->base.file_priv == file) {
845 dc->event->base.destroy(&dc->event->base); 869 dc->event->base.destroy(&dc->event->base);
846 drm_vblank_put(drm, dc->pipe); 870 drm_crtc_vblank_put(crtc);
847 dc->event = NULL; 871 dc->event = NULL;
848 } 872 }
849 873
@@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
853static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 877static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
854 struct drm_pending_vblank_event *event, uint32_t page_flip_flags) 878 struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
855{ 879{
880 unsigned int pipe = drm_crtc_index(crtc);
856 struct tegra_dc *dc = to_tegra_dc(crtc); 881 struct tegra_dc *dc = to_tegra_dc(crtc);
857 struct drm_device *drm = crtc->dev;
858 882
859 if (dc->event) 883 if (dc->event)
860 return -EBUSY; 884 return -EBUSY;
861 885
862 if (event) { 886 if (event) {
863 event->pipe = dc->pipe; 887 event->pipe = pipe;
864 dc->event = event; 888 dc->event = event;
865 drm_vblank_get(drm, dc->pipe); 889 drm_crtc_vblank_get(crtc);
866 } 890 }
867 891
868 tegra_dc_set_base(dc, 0, 0, fb); 892 tegra_dc_set_base(dc, 0, 0, fb);
@@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
1127 /* 1151 /*
1128 dev_dbg(dc->dev, "%s(): vertical blank\n", __func__); 1152 dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
1129 */ 1153 */
1130 drm_handle_vblank(dc->base.dev, dc->pipe); 1154 drm_crtc_handle_vblank(&dc->base);
1131 tegra_dc_finish_page_flip(dc); 1155 tegra_dc_finish_page_flip(dc);
1132 } 1156 }
1133 1157
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e549afeece1f..d4f827593dfa 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = {
694 .llseek = noop_llseek, 694 .llseek = noop_llseek,
695}; 695};
696 696
697static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe) 697static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
698 unsigned int pipe)
698{ 699{
699 struct drm_crtc *crtc; 700 struct drm_crtc *crtc;
700 701
701 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) { 702 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
702 struct tegra_dc *dc = to_tegra_dc(crtc); 703 if (pipe == drm_crtc_index(crtc))
703
704 if (dc->pipe == pipe)
705 return crtc; 704 return crtc;
706 } 705 }
707 706
708 return NULL; 707 return NULL;
709} 708}
710 709
711static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc) 710static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
712{ 711{
712 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
713
714 if (!crtc)
715 return 0;
716
713 /* TODO: implement real hardware counter using syncpoints */ 717 /* TODO: implement real hardware counter using syncpoints */
714 return drm_vblank_count(dev, crtc); 718 return drm_crtc_vblank_count(crtc);
715} 719}
716 720
717static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe) 721static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index da32086cbeaf..8777b7f75791 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
216 } 216 }
217} 217}
218 218
219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo, 219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
220 size_t size)
221{ 220{
221 struct scatterlist *s;
222 struct sg_table *sgt;
223 unsigned int i;
224
222 bo->pages = drm_gem_get_pages(&bo->gem); 225 bo->pages = drm_gem_get_pages(&bo->gem);
223 if (IS_ERR(bo->pages)) 226 if (IS_ERR(bo->pages))
224 return PTR_ERR(bo->pages); 227 return PTR_ERR(bo->pages);
225 228
226 bo->num_pages = size >> PAGE_SHIFT; 229 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
227 230
228 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 231 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
229 if (IS_ERR(bo->sgt)) { 232 if (IS_ERR(sgt))
230 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 233 goto put_pages;
231 return PTR_ERR(bo->sgt); 234
235 /*
236 * Fake up the SG table so that dma_map_sg() can be used to flush the
237 * pages associated with it. Note that this relies on the fact that
238 * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
239 * only cache maintenance.
240 *
241 * TODO: Replace this by drm_clflash_sg() once it can be implemented
242 * without relying on symbols that are not exported.
243 */
244 for_each_sg(sgt->sgl, s, sgt->nents, i)
245 sg_dma_address(s) = sg_phys(s);
246
247 if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
248 sgt = ERR_PTR(-ENOMEM);
249 goto release_sgt;
232 } 250 }
233 251
252 bo->sgt = sgt;
253
234 return 0; 254 return 0;
255
256release_sgt:
257 sg_free_table(sgt);
258 kfree(sgt);
259put_pages:
260 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
261 return PTR_ERR(sgt);
235} 262}
236 263
237static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, 264static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
238 size_t size)
239{ 265{
240 struct tegra_drm *tegra = drm->dev_private; 266 struct tegra_drm *tegra = drm->dev_private;
241 int err; 267 int err;
242 268
243 if (tegra->domain) { 269 if (tegra->domain) {
244 err = tegra_bo_get_pages(drm, bo, size); 270 err = tegra_bo_get_pages(drm, bo);
245 if (err < 0) 271 if (err < 0)
246 return err; 272 return err;
247 273
@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
251 return err; 277 return err;
252 } 278 }
253 } else { 279 } else {
280 size_t size = bo->gem.size;
281
254 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, 282 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
255 GFP_KERNEL | __GFP_NOWARN); 283 GFP_KERNEL | __GFP_NOWARN);
256 if (!bo->vaddr) { 284 if (!bo->vaddr) {
@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
274 if (IS_ERR(bo)) 302 if (IS_ERR(bo))
275 return bo; 303 return bo;
276 304
277 err = tegra_bo_alloc(drm, bo, size); 305 err = tegra_bo_alloc(drm, bo);
278 if (err < 0) 306 if (err < 0)
279 goto release; 307 goto release;
280 308
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 1bf891bd321a..4f361b77c749 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -264,7 +264,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
264 264
265 if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) && 265 if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
266 inode->i_sb->s_root != NULL && 266 inode->i_sb->s_root != NULL &&
267 is_root_inode(inode)) 267 !is_root_inode(inode))
268 ll_invalidate_aliases(inode); 268 ll_invalidate_aliases(inode);
269 269
270 iput(inode); 270 iput(inode);
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 8d7fc48b1f30..29fa1c3d0089 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -46,6 +46,8 @@ static struct console usbcons;
46 * ------------------------------------------------------------ 46 * ------------------------------------------------------------
47 */ 47 */
48 48
49static const struct tty_operations usb_console_fake_tty_ops = {
50};
49 51
50/* 52/*
51 * The parsing of the command line works exactly like the 53 * The parsing of the command line works exactly like the
@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
137 goto reset_open_count; 139 goto reset_open_count;
138 } 140 }
139 kref_init(&tty->kref); 141 kref_init(&tty->kref);
140 tty_port_tty_set(&port->port, tty);
141 tty->driver = usb_serial_tty_driver; 142 tty->driver = usb_serial_tty_driver;
142 tty->index = co->index; 143 tty->index = co->index;
144 init_ldsem(&tty->ldisc_sem);
145 INIT_LIST_HEAD(&tty->tty_files);
146 kref_get(&tty->driver->kref);
147 tty->ops = &usb_console_fake_tty_ops;
143 if (tty_init_termios(tty)) { 148 if (tty_init_termios(tty)) {
144 retval = -ENOMEM; 149 retval = -ENOMEM;
145 goto free_tty; 150 goto put_tty;
146 } 151 }
152 tty_port_tty_set(&port->port, tty);
147 } 153 }
148 154
149 /* only call the device specific open if this 155 /* only call the device specific open if this
@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
161 serial->type->set_termios(tty, port, &dummy); 167 serial->type->set_termios(tty, port, &dummy);
162 168
163 tty_port_tty_set(&port->port, NULL); 169 tty_port_tty_set(&port->port, NULL);
164 kfree(tty); 170 tty_kref_put(tty);
165 } 171 }
166 set_bit(ASYNCB_INITIALIZED, &port->port.flags); 172 set_bit(ASYNCB_INITIALIZED, &port->port.flags);
167 } 173 }
@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
177 183
178 fail: 184 fail:
179 tty_port_tty_set(&port->port, NULL); 185 tty_port_tty_set(&port->port, NULL);
180 free_tty: 186 put_tty:
181 kfree(tty); 187 tty_kref_put(tty);
182 reset_open_count: 188 reset_open_count:
183 port->port.count = 0; 189 port->port.count = 0;
184 usb_autopm_put_interface(serial->interface); 190 usb_autopm_put_interface(serial->interface);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 6c4eb3cf5efd..f4c56fc1a9f6 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ 121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ 122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
123 { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */ 123 { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
124 { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
124 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 125 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
125 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 126 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
126 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 127 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
128 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
127 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 129 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
128 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 130 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
129 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 131 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 1bd192290b08..ccf1df7c4b80 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -286,7 +286,7 @@ static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
286 286
287 res = usb_submit_urb(port->read_urbs[index], mem_flags); 287 res = usb_submit_urb(port->read_urbs[index], mem_flags);
288 if (res) { 288 if (res) {
289 if (res != -EPERM) { 289 if (res != -EPERM && res != -ENODEV) {
290 dev_err(&port->dev, 290 dev_err(&port->dev,
291 "%s - usb_submit_urb failed: %d\n", 291 "%s - usb_submit_urb failed: %d\n",
292 __func__, res); 292 __func__, res);
@@ -373,7 +373,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
373 __func__, urb->status); 373 __func__, urb->status);
374 return; 374 return;
375 default: 375 default:
376 dev_err(&port->dev, "%s - nonzero urb status: %d\n", 376 dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
377 __func__, urb->status); 377 __func__, urb->status);
378 goto resubmit; 378 goto resubmit;
379 } 379 }
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 077c714f1285..e07b15ed5814 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb)
410 } 410 }
411 port = serial->port[msg->port]; 411 port = serial->port[msg->port];
412 p_priv = usb_get_serial_port_data(port); 412 p_priv = usb_get_serial_port_data(port);
413 if (!p_priv)
414 goto resubmit;
413 415
414 /* Update handshaking pin state information */ 416 /* Update handshaking pin state information */
415 old_dcd_state = p_priv->dcd_state; 417 old_dcd_state = p_priv->dcd_state;
@@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb)
420 422
421 if (old_dcd_state != p_priv->dcd_state) 423 if (old_dcd_state != p_priv->dcd_state)
422 tty_port_tty_hangup(&port->port, true); 424 tty_port_tty_hangup(&port->port, true);
423 425resubmit:
424 /* Resubmit urb so we continue receiving */ 426 /* Resubmit urb so we continue receiving */
425 err = usb_submit_urb(urb, GFP_ATOMIC); 427 err = usb_submit_urb(urb, GFP_ATOMIC);
426 if (err != 0) 428 if (err != 0)
@@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb)
527 } 529 }
528 port = serial->port[msg->port]; 530 port = serial->port[msg->port];
529 p_priv = usb_get_serial_port_data(port); 531 p_priv = usb_get_serial_port_data(port);
532 if (!p_priv)
533 goto resubmit;
530 534
531 /* Update handshaking pin state information */ 535 /* Update handshaking pin state information */
532 old_dcd_state = p_priv->dcd_state; 536 old_dcd_state = p_priv->dcd_state;
@@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb)
537 541
538 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 542 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
539 tty_port_tty_hangup(&port->port, true); 543 tty_port_tty_hangup(&port->port, true);
540 544resubmit:
541 /* Resubmit urb so we continue receiving */ 545 /* Resubmit urb so we continue receiving */
542 err = usb_submit_urb(urb, GFP_ATOMIC); 546 err = usb_submit_urb(urb, GFP_ATOMIC);
543 if (err != 0) 547 if (err != 0)
@@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb)
607 } 611 }
608 port = serial->port[msg->portNumber]; 612 port = serial->port[msg->portNumber];
609 p_priv = usb_get_serial_port_data(port); 613 p_priv = usb_get_serial_port_data(port);
614 if (!p_priv)
615 goto resubmit;
610 616
611 /* Update handshaking pin state information */ 617 /* Update handshaking pin state information */
612 old_dcd_state = p_priv->dcd_state; 618 old_dcd_state = p_priv->dcd_state;
@@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb)
617 623
618 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 624 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
619 tty_port_tty_hangup(&port->port, true); 625 tty_port_tty_hangup(&port->port, true);
620 626resubmit:
621 /* Resubmit urb so we continue receiving */ 627 /* Resubmit urb so we continue receiving */
622 err = usb_submit_urb(urb, GFP_ATOMIC); 628 err = usb_submit_urb(urb, GFP_ATOMIC);
623 if (err != 0) 629 if (err != 0)
@@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb)
855 861
856 port = serial->port[0]; 862 port = serial->port[0];
857 p_priv = usb_get_serial_port_data(port); 863 p_priv = usb_get_serial_port_data(port);
864 if (!p_priv)
865 goto resubmit;
858 866
859 /* Update handshaking pin state information */ 867 /* Update handshaking pin state information */
860 old_dcd_state = p_priv->dcd_state; 868 old_dcd_state = p_priv->dcd_state;
@@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb)
865 873
866 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 874 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
867 tty_port_tty_hangup(&port->port, true); 875 tty_port_tty_hangup(&port->port, true);
868 876resubmit:
869 /* Resubmit urb so we continue receiving */ 877 /* Resubmit urb so we continue receiving */
870 err = usb_submit_urb(urb, GFP_ATOMIC); 878 err = usb_submit_urb(urb, GFP_ATOMIC);
871 if (err != 0) 879 if (err != 0)
@@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb)
926 934
927 port = serial->port[msg->port]; 935 port = serial->port[msg->port];
928 p_priv = usb_get_serial_port_data(port); 936 p_priv = usb_get_serial_port_data(port);
937 if (!p_priv)
938 goto resubmit;
929 939
930 /* Update handshaking pin state information */ 940 /* Update handshaking pin state information */
931 old_dcd_state = p_priv->dcd_state; 941 old_dcd_state = p_priv->dcd_state;
@@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb)
934 944
935 if (old_dcd_state != p_priv->dcd_state && old_dcd_state) 945 if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
936 tty_port_tty_hangup(&port->port, true); 946 tty_port_tty_hangup(&port->port, true);
937 947resubmit:
938 /* Resubmit urb so we continue receiving */ 948 /* Resubmit urb so we continue receiving */
939 err = usb_submit_urb(urb, GFP_ATOMIC); 949 err = usb_submit_urb(urb, GFP_ATOMIC);
940 if (err != 0) 950 if (err != 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7a4c21b4f676..efdcee15b520 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb);
234 234
235#define QUALCOMM_VENDOR_ID 0x05C6 235#define QUALCOMM_VENDOR_ID 0x05C6
236 236
237#define SIERRA_VENDOR_ID 0x1199
238
237#define CMOTECH_VENDOR_ID 0x16d8 239#define CMOTECH_VENDOR_ID 0x16d8
238#define CMOTECH_PRODUCT_6001 0x6001 240#define CMOTECH_PRODUCT_6001 0x6001
239#define CMOTECH_PRODUCT_CMU_300 0x6002 241#define CMOTECH_PRODUCT_CMU_300 0x6002
@@ -512,7 +514,7 @@ enum option_blacklist_reason {
512 OPTION_BLACKLIST_RESERVED_IF = 2 514 OPTION_BLACKLIST_RESERVED_IF = 2
513}; 515};
514 516
515#define MAX_BL_NUM 8 517#define MAX_BL_NUM 11
516struct option_blacklist_info { 518struct option_blacklist_info {
517 /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */ 519 /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
518 const unsigned long sendsetup; 520 const unsigned long sendsetup;
@@ -601,6 +603,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
601 .reserved = BIT(1) | BIT(5), 603 .reserved = BIT(1) | BIT(5),
602}; 604};
603 605
606static const struct option_blacklist_info sierra_mc73xx_blacklist = {
607 .sendsetup = BIT(0) | BIT(2),
608 .reserved = BIT(8) | BIT(10) | BIT(11),
609};
610
604static const struct usb_device_id option_ids[] = { 611static const struct usb_device_id option_ids[] = {
605 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 612 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
606 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 613 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1098,6 +1105,8 @@ static const struct usb_device_id option_ids[] = {
1098 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1105 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1106 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1100 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1107 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1108 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
1109 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
1101 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1110 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1102 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1111 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1103 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1112 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index cb3e14780a7e..9c63897b3a56 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = {
142 {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */ 142 {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
143 {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */ 143 {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
144 {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ 144 {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
145 {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC73xx */
146 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ 145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
147 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ 146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
148 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ 147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */