aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/gtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/gtt.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c115
1 files changed, 58 insertions, 57 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2402395a068d..58e166effa45 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1905 vgpu_free_mm(mm); 1905 vgpu_free_mm(mm);
1906 return ERR_PTR(-ENOMEM); 1906 return ERR_PTR(-ENOMEM);
1907 } 1907 }
1908 mm->ggtt_mm.last_partial_off = -1UL;
1909 1908
1910 return mm; 1909 return mm;
1911} 1910}
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1930 invalidate_ppgtt_mm(mm); 1929 invalidate_ppgtt_mm(mm);
1931 } else { 1930 } else {
1932 vfree(mm->ggtt_mm.virtual_ggtt); 1931 vfree(mm->ggtt_mm.virtual_ggtt);
1933 mm->ggtt_mm.last_partial_off = -1UL;
1934 } 1932 }
1935 1933
1936 vgpu_free_mm(mm); 1934 vgpu_free_mm(mm);
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2168 struct intel_gvt_gtt_entry e, m; 2166 struct intel_gvt_gtt_entry e, m;
2169 dma_addr_t dma_addr; 2167 dma_addr_t dma_addr;
2170 int ret; 2168 int ret;
2169 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2170 bool partial_update = false;
2171 2171
2172 if (bytes != 4 && bytes != 8) 2172 if (bytes != 4 && bytes != 8)
2173 return -EINVAL; 2173 return -EINVAL;
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2178 if (!vgpu_gmadr_is_valid(vgpu, gma)) 2178 if (!vgpu_gmadr_is_valid(vgpu, gma))
2179 return 0; 2179 return 0;
2180 2180
2181 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 2181 e.type = GTT_TYPE_GGTT_PTE;
2182
2183 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 2182 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2184 bytes); 2183 bytes);
2185 2184
2186 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes 2185 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2187 * write, we assume the two 4 bytes writes are consecutive. 2186 * write, save the first 4 bytes in a list and update virtual
2188 * Otherwise, we abort and report error 2187 * PTE. Only update shadow PTE when the second 4 bytes comes.
2189 */ 2188 */
2190 if (bytes < info->gtt_entry_size) { 2189 if (bytes < info->gtt_entry_size) {
2191 if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { 2190 bool found = false;
2192 /* the first partial part*/ 2191
2193 ggtt_mm->ggtt_mm.last_partial_off = off; 2192 list_for_each_entry_safe(pos, n,
2194 ggtt_mm->ggtt_mm.last_partial_data = e.val64; 2193 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2195 return 0; 2194 if (g_gtt_index == pos->offset >>
2196 } else if ((g_gtt_index == 2195 info->gtt_entry_size_shift) {
2197 (ggtt_mm->ggtt_mm.last_partial_off >> 2196 if (off != pos->offset) {
2198 info->gtt_entry_size_shift)) && 2197 /* the second partial part*/
2199 (off != ggtt_mm->ggtt_mm.last_partial_off)) { 2198 int last_off = pos->offset &
2200 /* the second partial part */ 2199 (info->gtt_entry_size - 1);
2201 2200
2202 int last_off = ggtt_mm->ggtt_mm.last_partial_off & 2201 memcpy((void *)&e.val64 + last_off,
2203 (info->gtt_entry_size - 1); 2202 (void *)&pos->data + last_off,
2204 2203 bytes);
2205 memcpy((void *)&e.val64 + last_off, 2204
2206 (void *)&ggtt_mm->ggtt_mm.last_partial_data + 2205 list_del(&pos->list);
2207 last_off, bytes); 2206 kfree(pos);
2208 2207 found = true;
2209 ggtt_mm->ggtt_mm.last_partial_off = -1UL; 2208 break;
2210 } else { 2209 }
2211 int last_offset; 2210
2212 2211 /* update of the first partial part */
2213 gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", 2212 pos->data = e.val64;
2214 ggtt_mm->ggtt_mm.last_partial_off, off, 2213 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2215 bytes, info->gtt_entry_size); 2214 return 0;
2216 2215 }
2217 /* set host ggtt entry to scratch page and clear 2216 }
2218 * virtual ggtt entry as not present for last
2219 * partially write offset
2220 */
2221 last_offset = ggtt_mm->ggtt_mm.last_partial_off &
2222 (~(info->gtt_entry_size - 1));
2223
2224 ggtt_get_host_entry(ggtt_mm, &m, last_offset);
2225 ggtt_invalidate_pte(vgpu, &m);
2226 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2227 ops->clear_present(&m);
2228 ggtt_set_host_entry(ggtt_mm, &m, last_offset);
2229 ggtt_invalidate(gvt->dev_priv);
2230
2231 ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
2232 ops->clear_present(&e);
2233 ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
2234
2235 ggtt_mm->ggtt_mm.last_partial_off = off;
2236 ggtt_mm->ggtt_mm.last_partial_data = e.val64;
2237 2217
2238 return 0; 2218 if (!found) {
2219 /* the first partial part */
2220 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2221 if (!partial_pte)
2222 return -ENOMEM;
2223 partial_pte->offset = off;
2224 partial_pte->data = e.val64;
2225 list_add_tail(&partial_pte->list,
2226 &ggtt_mm->ggtt_mm.partial_pte_list);
2227 partial_update = true;
2239 } 2228 }
2240 } 2229 }
2241 2230
2242 if (ops->test_present(&e)) { 2231 if (!partial_update && (ops->test_present(&e))) {
2243 gfn = ops->get_pfn(&e); 2232 gfn = ops->get_pfn(&e);
2244 m = e; 2233 m = e;
2245 2234
@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2263 } else 2252 } else
2264 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 2253 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2265 } else { 2254 } else {
2266 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
2267 ggtt_invalidate_pte(vgpu, &m);
2268 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 2255 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2269 ops->clear_present(&m); 2256 ops->clear_present(&m);
2270 } 2257 }
2271 2258
2272out: 2259out:
2260 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2261
2262 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2263 ggtt_invalidate_pte(vgpu, &e);
2264
2273 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 2265 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2274 ggtt_invalidate(gvt->dev_priv); 2266 ggtt_invalidate(gvt->dev_priv);
2275 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2276 return 0; 2267 return 0;
2277} 2268}
2278 2269
@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2430 2421
2431 intel_vgpu_reset_ggtt(vgpu, false); 2422 intel_vgpu_reset_ggtt(vgpu, false);
2432 2423
2424 INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
2425
2433 return create_scratch_page_tree(vgpu); 2426 return create_scratch_page_tree(vgpu);
2434} 2427}
2435 2428
@@ -2454,6 +2447,14 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2454 2447
2455static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) 2448static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2456{ 2449{
2450 struct intel_gvt_partial_pte *pos;
2451
2452 list_for_each_entry(pos,
2453 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
2454 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2455 pos->offset, pos->data);
2456 kfree(pos);
2457 }
2457 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); 2458 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2458 vgpu->gtt.ggtt_mm = NULL; 2459 vgpu->gtt.ggtt_mm = NULL;
2459} 2460}