aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2017-01-10 03:10:22 -0500
committerJani Nikula <jani.nikula@intel.com>2017-01-10 03:10:31 -0500
commit79b11b6437bd31b315f5fda72fe1a00baf98e804 (patch)
treeeb54c123e2c0c4257ee5b9cc0fe5e8e580afa66d
parenta121103c922847ba5010819a3f250f1f7fc84ab8 (diff)
parent9631739f8196ec80b5d9bf955f79b711490c0205 (diff)
Merge tag 'gvt-fixes-2017-01-10' of https://github.com/01org/gvt-linux into drm-intel-fixes
GVT-g fixes from Zhenya, "Please pull GVT-g device model fixes for rc4. This is based on rc3 with new vfio/mdev interface change." Signed-off-by: Jani Nikula <jani.nikula@intel.com>
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c54
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c31
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
10 files changed, 73 insertions, 87 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 0d41ebc4aea6..65200313515c 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -37,13 +37,6 @@
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include "gvt.h" 38#include "gvt.h"
39 39
40#define MB_TO_BYTES(mb) ((mb) << 20ULL)
41#define BYTES_TO_MB(b) ((b) >> 20ULL)
42
43#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
44#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
45#define HOST_FENCE 4
46
47static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 40static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
48{ 41{
49 struct intel_gvt *gvt = vgpu->gvt; 42 struct intel_gvt *gvt = vgpu->gvt;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6c5fdf5b2ce2..3cf0df0bb391 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
241{ 241{
242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
243 u64 pte;
244 243
245#ifdef readq 244 return readq(addr);
246 pte = readq(addr);
247#else
248 pte = ioread32(addr);
249 pte |= (u64)ioread32(addr + 4) << 32;
250#endif
251 return pte;
252} 245}
253 246
254static void write_pte64(struct drm_i915_private *dev_priv, 247static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
256{ 249{
257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 250 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
258 251
259#ifdef writeq
260 writeq(pte, addr); 252 writeq(pte, addr);
261#else 253
262 iowrite32((u32)pte, addr);
263 iowrite32(pte >> 32, addr + 4);
264#endif
265 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 254 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
266 POSTING_READ(GFX_FLSH_CNTL_GEN6); 255 POSTING_READ(GFX_FLSH_CNTL_GEN6);
267} 256}
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1380 info->gtt_entry_size; 1369 info->gtt_entry_size;
1381 mem = kzalloc(mm->has_shadow_page_table ? 1370 mem = kzalloc(mm->has_shadow_page_table ?
1382 mm->page_table_entry_size * 2 1371 mm->page_table_entry_size * 2
1383 : mm->page_table_entry_size, 1372 : mm->page_table_entry_size, GFP_KERNEL);
1384 GFP_ATOMIC);
1385 if (!mem) 1373 if (!mem)
1386 return -ENOMEM; 1374 return -ENOMEM;
1387 mm->virtual_page_table = mem; 1375 mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1532 struct intel_vgpu_mm *mm; 1520 struct intel_vgpu_mm *mm;
1533 int ret; 1521 int ret;
1534 1522
1535 mm = kzalloc(sizeof(*mm), GFP_ATOMIC); 1523 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1536 if (!mm) { 1524 if (!mm) {
1537 ret = -ENOMEM; 1525 ret = -ENOMEM;
1538 goto fail; 1526 goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1886 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1874 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1887 int page_entry_num = GTT_PAGE_SIZE >> 1875 int page_entry_num = GTT_PAGE_SIZE >>
1888 vgpu->gvt->device_info.gtt_entry_size_shift; 1876 vgpu->gvt->device_info.gtt_entry_size_shift;
1889 struct page *scratch_pt; 1877 void *scratch_pt;
1890 unsigned long mfn; 1878 unsigned long mfn;
1891 int i; 1879 int i;
1892 void *p;
1893 1880
1894 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1881 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1895 return -EINVAL; 1882 return -EINVAL;
1896 1883
1897 scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 1884 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
1898 if (!scratch_pt) { 1885 if (!scratch_pt) {
1899 gvt_err("fail to allocate scratch page\n"); 1886 gvt_err("fail to allocate scratch page\n");
1900 return -ENOMEM; 1887 return -ENOMEM;
1901 } 1888 }
1902 1889
1903 p = kmap_atomic(scratch_pt); 1890 mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
1904 mfn = intel_gvt_hypervisor_virt_to_mfn(p);
1905 if (mfn == INTEL_GVT_INVALID_ADDR) { 1891 if (mfn == INTEL_GVT_INVALID_ADDR) {
1906 gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); 1892 gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
1907 kunmap_atomic(p); 1893 free_page((unsigned long)scratch_pt);
1908 __free_page(scratch_pt);
1909 return -EFAULT; 1894 return -EFAULT;
1910 } 1895 }
1911 gtt->scratch_pt[type].page_mfn = mfn; 1896 gtt->scratch_pt[type].page_mfn = mfn;
1912 gtt->scratch_pt[type].page = scratch_pt; 1897 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
1913 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1898 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
1914 vgpu->id, type, mfn); 1899 vgpu->id, type, mfn);
1915 1900
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1918 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1903 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1919 * 'type' pt. 1904 * 'type' pt.
1920 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1905 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
1921 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self 1906 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
1922 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1907 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1923 */ 1908 */
1924 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { 1909 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1936 se.val64 |= PPAT_CACHED_INDEX; 1921 se.val64 |= PPAT_CACHED_INDEX;
1937 1922
1938 for (i = 0; i < page_entry_num; i++) 1923 for (i = 0; i < page_entry_num; i++)
1939 ops->set_entry(p, &se, i, false, 0, vgpu); 1924 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
1940 } 1925 }
1941 1926
1942 kunmap_atomic(p);
1943
1944 return 0; 1927 return 0;
1945} 1928}
1946 1929
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2208int intel_gvt_init_gtt(struct intel_gvt *gvt) 2191int intel_gvt_init_gtt(struct intel_gvt *gvt)
2209{ 2192{
2210 int ret; 2193 int ret;
2211 void *page_addr; 2194 void *page;
2212 2195
2213 gvt_dbg_core("init gtt\n"); 2196 gvt_dbg_core("init gtt\n");
2214 2197
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2221 return -ENODEV; 2204 return -ENODEV;
2222 } 2205 }
2223 2206
2224 gvt->gtt.scratch_ggtt_page = 2207 page = (void *)get_zeroed_page(GFP_KERNEL);
2225 alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 2208 if (!page) {
2226 if (!gvt->gtt.scratch_ggtt_page) {
2227 gvt_err("fail to allocate scratch ggtt page\n"); 2209 gvt_err("fail to allocate scratch ggtt page\n");
2228 return -ENOMEM; 2210 return -ENOMEM;
2229 } 2211 }
2212 gvt->gtt.scratch_ggtt_page = virt_to_page(page);
2230 2213
2231 page_addr = page_address(gvt->gtt.scratch_ggtt_page); 2214 gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
2232
2233 gvt->gtt.scratch_ggtt_mfn =
2234 intel_gvt_hypervisor_virt_to_mfn(page_addr);
2235 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { 2215 if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
2236 gvt_err("fail to translate scratch ggtt page\n"); 2216 gvt_err("fail to translate scratch ggtt page\n");
2237 __free_page(gvt->gtt.scratch_ggtt_page); 2217 __free_page(gvt->gtt.scratch_ggtt_page);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 398877c3d2fd..e6bf5c533fbe 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); 201 intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
202 intel_gvt_clean_vgpu_types(gvt); 202 intel_gvt_clean_vgpu_types(gvt);
203 203
204 idr_destroy(&gvt->vgpu_idr);
205
204 kfree(dev_priv->gvt); 206 kfree(dev_priv->gvt);
205 dev_priv->gvt = NULL; 207 dev_priv->gvt = NULL;
206} 208}
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
237 239
238 gvt_dbg_core("init gvt device\n"); 240 gvt_dbg_core("init gvt device\n");
239 241
242 idr_init(&gvt->vgpu_idr);
243
240 mutex_init(&gvt->lock); 244 mutex_init(&gvt->lock);
241 gvt->dev_priv = dev_priv; 245 gvt->dev_priv = dev_priv;
242 246
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
244 248
245 ret = intel_gvt_setup_mmio_info(gvt); 249 ret = intel_gvt_setup_mmio_info(gvt);
246 if (ret) 250 if (ret)
247 return ret; 251 goto out_clean_idr;
248 252
249 ret = intel_gvt_load_firmware(gvt); 253 ret = intel_gvt_load_firmware(gvt);
250 if (ret) 254 if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
313 intel_gvt_free_firmware(gvt); 317 intel_gvt_free_firmware(gvt);
314out_clean_mmio_info: 318out_clean_mmio_info:
315 intel_gvt_clean_mmio_info(gvt); 319 intel_gvt_clean_mmio_info(gvt);
320out_clean_idr:
321 idr_destroy(&gvt->vgpu_idr);
316 kfree(gvt); 322 kfree(gvt);
317 return ret; 323 return ret;
318} 324}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 522809710312..8cbaf1c83720 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
93static int new_mmio_info(struct intel_gvt *gvt, 93static int new_mmio_info(struct intel_gvt *gvt,
94 u32 offset, u32 flags, u32 size, 94 u32 offset, u32 flags, u32 size,
95 u32 addr_mask, u32 ro_mask, u32 device, 95 u32 addr_mask, u32 ro_mask, u32 device,
96 void *read, void *write) 96 int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
97 int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
97{ 98{
98 struct intel_gvt_mmio_info *info, *p; 99 struct intel_gvt_mmio_info *info, *p;
99 u32 start, end, i; 100 u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
219 default: 220 default:
220 /*should not hit here*/ 221 /*should not hit here*/
221 gvt_err("invalid forcewake offset 0x%x\n", offset); 222 gvt_err("invalid forcewake offset 0x%x\n", offset);
222 return 1; 223 return -EINVAL;
223 } 224 }
224 } else { 225 } else {
225 ack_reg_offset = FORCEWAKE_ACK_HSW_REG; 226 ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -974,7 +975,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
974 return 0; 975 return 0;
975} 976}
976 977
977static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 978static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
978 void *p_data, unsigned int bytes) 979 void *p_data, unsigned int bytes)
979{ 980{
980 u32 data; 981 u32 data;
@@ -1366,7 +1367,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
1366static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, 1367static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
1367 unsigned int offset, void *p_data, unsigned int bytes) 1368 unsigned int offset, void *p_data, unsigned int bytes)
1368{ 1369{
1369 int rc = 0;
1370 unsigned int id = 0; 1370 unsigned int id = 0;
1371 1371
1372 write_vreg(vgpu, offset, p_data, bytes); 1372 write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1389,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
1389 id = VECS; 1389 id = VECS;
1390 break; 1390 break;
1391 default: 1391 default:
1392 rc = -EINVAL; 1392 return -EINVAL;
1393 break;
1394 } 1393 }
1395 set_bit(id, (void *)vgpu->tlb_handle_pending); 1394 set_bit(id, (void *)vgpu->tlb_handle_pending);
1396 1395
1397 return rc; 1396 return 0;
1398} 1397}
1399 1398
1400static int ring_reset_ctl_write(struct intel_vgpu *vgpu, 1399static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index faaae07ae487..0c9234a87a20 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
398 struct intel_vgpu_type *type; 398 struct intel_vgpu_type *type;
399 struct device *pdev; 399 struct device *pdev;
400 void *gvt; 400 void *gvt;
401 int ret;
401 402
402 pdev = mdev_parent_dev(mdev); 403 pdev = mdev_parent_dev(mdev);
403 gvt = kdev_to_i915(pdev)->gvt; 404 gvt = kdev_to_i915(pdev)->gvt;
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
406 if (!type) { 407 if (!type) {
407 gvt_err("failed to find type %s to create\n", 408 gvt_err("failed to find type %s to create\n",
408 kobject_name(kobj)); 409 kobject_name(kobj));
409 return -EINVAL; 410 ret = -EINVAL;
411 goto out;
410 } 412 }
411 413
412 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 414 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
413 if (IS_ERR_OR_NULL(vgpu)) { 415 if (IS_ERR_OR_NULL(vgpu)) {
414 gvt_err("create intel vgpu failed\n"); 416 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
415 return -EINVAL; 417 gvt_err("failed to create intel vgpu: %d\n", ret);
418 goto out;
416 } 419 }
417 420
418 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); 421 INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
422 425
423 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", 426 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
424 dev_name(mdev_dev(mdev))); 427 dev_name(mdev_dev(mdev)));
425 return 0; 428 ret = 0;
429
430out:
431 return ret;
426} 432}
427 433
428static int intel_vgpu_remove(struct mdev_device *mdev) 434static int intel_vgpu_remove(struct mdev_device *mdev)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 09c9450a1946..e60701397ac2 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
126 goto err; 126 goto err;
127 127
128 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
129 if (!mmio && !vgpu->mmio.disable_warn_untrack) {
130 gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
131 vgpu->id, offset, bytes, *(u32 *)p_data);
132
133 if (offset == 0x206c) {
134 gvt_err("------------------------------------------\n");
135 gvt_err("vgpu%d: likely triggers a gfx reset\n",
136 vgpu->id);
137 gvt_err("------------------------------------------\n");
138 vgpu->mmio.disable_warn_untrack = true;
139 }
140 }
141
142 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 128 if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
143 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 129 if (WARN_ON(!IS_ALIGNED(offset, bytes)))
144 goto err; 130 goto err;
145 } 131 }
146 132
133 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
147 if (mmio) { 134 if (mmio) {
148 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 135 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
149 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 136 if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
152 goto err; 139 goto err;
153 } 140 }
154 ret = mmio->read(vgpu, offset, p_data, bytes); 141 ret = mmio->read(vgpu, offset, p_data, bytes);
155 } else 142 } else {
156 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 143 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
157 144
145 if (!vgpu->mmio.disable_warn_untrack) {
146 gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
147 vgpu->id, offset, bytes, *(u32 *)p_data);
148
149 if (offset == 0x206c) {
150 gvt_err("------------------------------------------\n");
151 gvt_err("vgpu%d: likely triggers a gfx reset\n",
152 vgpu->id);
153 gvt_err("------------------------------------------\n");
154 vgpu->mmio.disable_warn_untrack = true;
155 }
156 }
157 }
158
158 if (ret) 159 if (ret)
159 goto err; 160 goto err;
160 161
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 81cd921770c6..d9fb41ab7119 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
36 vgpu->id)) 36 vgpu->id))
37 return -EINVAL; 37 return -EINVAL;
38 38
39 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | 39 vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
40 GFP_DMA32 | __GFP_ZERO, 40 __GFP_ZERO,
41 INTEL_GVT_OPREGION_PORDER); 41 get_order(INTEL_GVT_OPREGION_SIZE));
42 42
43 if (!vgpu_opregion(vgpu)->va) 43 if (!vgpu_opregion(vgpu)->va)
44 return -ENOMEM; 44 return -ENOMEM;
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { 97 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
98 map_vgpu_opregion(vgpu, false); 98 map_vgpu_opregion(vgpu, false);
99 free_pages((unsigned long)vgpu_opregion(vgpu)->va, 99 free_pages((unsigned long)vgpu_opregion(vgpu)->va,
100 INTEL_GVT_OPREGION_PORDER); 100 get_order(INTEL_GVT_OPREGION_SIZE));
101 101
102 vgpu_opregion(vgpu)->va = NULL; 102 vgpu_opregion(vgpu)->va = NULL;
103 } 103 }
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 0dfe789d8f02..fbd023a16f18 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -50,8 +50,7 @@
50#define INTEL_GVT_OPREGION_PARM 0x204 50#define INTEL_GVT_OPREGION_PARM 0x204
51 51
52#define INTEL_GVT_OPREGION_PAGES 2 52#define INTEL_GVT_OPREGION_PAGES 2
53#define INTEL_GVT_OPREGION_PORDER 1 53#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
54#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
55 54
56#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) 55#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
57 56
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4db242250235..e91885dffeff 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
350{ 350{
351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 351 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
352 struct intel_vgpu_workload *workload; 352 struct intel_vgpu_workload *workload;
353 struct intel_vgpu *vgpu;
353 int event; 354 int event;
354 355
355 mutex_lock(&gvt->lock); 356 mutex_lock(&gvt->lock);
356 357
357 workload = scheduler->current_workload[ring_id]; 358 workload = scheduler->current_workload[ring_id];
359 vgpu = workload->vgpu;
358 360
359 if (!workload->status && !workload->vgpu->resetting) { 361 if (!workload->status && !vgpu->resetting) {
360 wait_event(workload->shadow_ctx_status_wq, 362 wait_event(workload->shadow_ctx_status_wq,
361 !atomic_read(&workload->shadow_ctx_active)); 363 !atomic_read(&workload->shadow_ctx_active));
362 364
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
364 366
365 for_each_set_bit(event, workload->pending_events, 367 for_each_set_bit(event, workload->pending_events,
366 INTEL_GVT_EVENT_MAX) 368 INTEL_GVT_EVENT_MAX)
367 intel_vgpu_trigger_virtual_event(workload->vgpu, 369 intel_vgpu_trigger_virtual_event(vgpu, event);
368 event);
369 } 370 }
370 371
371 gvt_dbg_sched("ring id %d complete workload %p status %d\n", 372 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
373 374
374 scheduler->current_workload[ring_id] = NULL; 375 scheduler->current_workload[ring_id] = NULL;
375 376
376 atomic_dec(&workload->vgpu->running_workload_num);
377
378 list_del_init(&workload->list); 377 list_del_init(&workload->list);
379 workload->complete(workload); 378 workload->complete(workload);
380 379
380 atomic_dec(&vgpu->running_workload_num);
381 wake_up(&scheduler->workload_complete_wq); 381 wake_up(&scheduler->workload_complete_wq);
382 mutex_unlock(&gvt->lock); 382 mutex_unlock(&gvt->lock);
383} 383}
@@ -459,11 +459,11 @@ complete:
459 gvt_dbg_sched("will complete workload %p\n, status: %d\n", 459 gvt_dbg_sched("will complete workload %p\n, status: %d\n",
460 workload, workload->status); 460 workload, workload->status);
461 461
462 complete_current_workload(gvt, ring_id);
463
464 if (workload->req) 462 if (workload->req)
465 i915_gem_request_put(fetch_and_zero(&workload->req)); 463 i915_gem_request_put(fetch_and_zero(&workload->req));
466 464
465 complete_current_workload(gvt, ring_id);
466
467 if (need_force_wake) 467 if (need_force_wake)
468 intel_uncore_forcewake_put(gvt->dev_priv, 468 intel_uncore_forcewake_put(gvt->dev_priv,
469 FORCEWAKE_ALL); 469 FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 536d2b9d5777..f0e86123e45b 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -177,7 +177,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
177 if (low_avail / min_low == 0) 177 if (low_avail / min_low == 0)
178 break; 178 break;
179 gvt->types[i].low_gm_size = min_low; 179 gvt->types[i].low_gm_size = min_low;
180 gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; 180 gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
181 gvt->types[i].fence = 4; 181 gvt->types[i].fence = 4;
182 gvt->types[i].max_instance = low_avail / min_low; 182 gvt->types[i].max_instance = low_avail / min_low;
183 gvt->types[i].avail_instance = gvt->types[i].max_instance; 183 gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +217,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
217 */ 217 */
218 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - 218 low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
219 gvt->gm.vgpu_allocated_low_gm_size; 219 gvt->gm.vgpu_allocated_low_gm_size;
220 high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - 220 high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
221 gvt->gm.vgpu_allocated_high_gm_size; 221 gvt->gm.vgpu_allocated_high_gm_size;
222 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - 222 fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
223 gvt->fence.vgpu_allocated_fence_num; 223 gvt->fence.vgpu_allocated_fence_num;
@@ -304,7 +304,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
304 304
305 ret = setup_vgpu_mmio(vgpu); 305 ret = setup_vgpu_mmio(vgpu);
306 if (ret) 306 if (ret)
307 goto out_free_vgpu; 307 goto out_clean_idr;
308 308
309 ret = intel_vgpu_alloc_resource(vgpu, param); 309 ret = intel_vgpu_alloc_resource(vgpu, param);
310 if (ret) 310 if (ret)
@@ -355,6 +355,8 @@ out_clean_vgpu_resource:
355 intel_vgpu_free_resource(vgpu); 355 intel_vgpu_free_resource(vgpu);
356out_clean_vgpu_mmio: 356out_clean_vgpu_mmio:
357 clean_vgpu_mmio(vgpu); 357 clean_vgpu_mmio(vgpu);
358out_clean_idr:
359 idr_remove(&gvt->vgpu_idr, vgpu->id);
358out_free_vgpu: 360out_free_vgpu:
359 vfree(vgpu); 361 vfree(vgpu);
360 mutex_unlock(&gvt->lock); 362 mutex_unlock(&gvt->lock);