diff options
46 files changed, 584 insertions, 520 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 26edd832c64e..2fd11b439ab3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -4100,12 +4100,18 @@ F: drivers/gpu/drm/bridge/ | |||
| 4100 | 4100 | ||
| 4101 | DRM DRIVER FOR BOCHS VIRTUAL GPU | 4101 | DRM DRIVER FOR BOCHS VIRTUAL GPU |
| 4102 | M: Gerd Hoffmann <kraxel@redhat.com> | 4102 | M: Gerd Hoffmann <kraxel@redhat.com> |
| 4103 | S: Odd Fixes | 4103 | L: virtualization@lists.linux-foundation.org |
| 4104 | T: git git://git.kraxel.org/linux drm-qemu | ||
| 4105 | S: Maintained | ||
| 4104 | F: drivers/gpu/drm/bochs/ | 4106 | F: drivers/gpu/drm/bochs/ |
| 4105 | 4107 | ||
| 4106 | DRM DRIVER FOR QEMU'S CIRRUS DEVICE | 4108 | DRM DRIVER FOR QEMU'S CIRRUS DEVICE |
| 4107 | M: Dave Airlie <airlied@redhat.com> | 4109 | M: Dave Airlie <airlied@redhat.com> |
| 4108 | S: Odd Fixes | 4110 | M: Gerd Hoffmann <kraxel@redhat.com> |
| 4111 | L: virtualization@lists.linux-foundation.org | ||
| 4112 | T: git git://git.kraxel.org/linux drm-qemu | ||
| 4113 | S: Obsolete | ||
| 4114 | W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ | ||
| 4109 | F: drivers/gpu/drm/cirrus/ | 4115 | F: drivers/gpu/drm/cirrus/ |
| 4110 | 4116 | ||
| 4111 | RADEON and AMDGPU DRM DRIVERS | 4117 | RADEON and AMDGPU DRM DRIVERS |
| @@ -4298,7 +4304,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt | |||
| 4298 | 4304 | ||
| 4299 | DRM DRIVER FOR QXL VIRTUAL GPU | 4305 | DRM DRIVER FOR QXL VIRTUAL GPU |
| 4300 | M: Dave Airlie <airlied@redhat.com> | 4306 | M: Dave Airlie <airlied@redhat.com> |
| 4301 | S: Odd Fixes | 4307 | M: Gerd Hoffmann <kraxel@redhat.com> |
| 4308 | L: virtualization@lists.linux-foundation.org | ||
| 4309 | T: git git://git.kraxel.org/linux drm-qemu | ||
| 4310 | S: Maintained | ||
| 4302 | F: drivers/gpu/drm/qxl/ | 4311 | F: drivers/gpu/drm/qxl/ |
| 4303 | F: include/uapi/drm/qxl_drm.h | 4312 | F: include/uapi/drm/qxl_drm.h |
| 4304 | 4313 | ||
| @@ -13092,6 +13101,7 @@ M: David Airlie <airlied@linux.ie> | |||
| 13092 | M: Gerd Hoffmann <kraxel@redhat.com> | 13101 | M: Gerd Hoffmann <kraxel@redhat.com> |
| 13093 | L: dri-devel@lists.freedesktop.org | 13102 | L: dri-devel@lists.freedesktop.org |
| 13094 | L: virtualization@lists.linux-foundation.org | 13103 | L: virtualization@lists.linux-foundation.org |
| 13104 | T: git git://git.kraxel.org/linux drm-qemu | ||
| 13095 | S: Maintained | 13105 | S: Maintained |
| 13096 | F: drivers/gpu/drm/virtio/ | 13106 | F: drivers/gpu/drm/virtio/ |
| 13097 | F: include/uapi/linux/virtio_gpu.h | 13107 | F: include/uapi/linux/virtio_gpu.h |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9999dc71b998..ccb5e02e7b20 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
| @@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, | |||
| 2512 | 2512 | ||
| 2513 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | 2513 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); |
| 2514 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | 2514 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); |
| 2515 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 2516 | ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
| 2515 | 2517 | ||
| 2516 | return 0; | 2518 | return 0; |
| 2517 | } | 2519 | } |
| @@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 2537 | int32_t hot_y) | 2539 | int32_t hot_y) |
| 2538 | { | 2540 | { |
| 2539 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2541 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 2540 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
| 2541 | struct drm_gem_object *obj; | 2542 | struct drm_gem_object *obj; |
| 2542 | struct amdgpu_bo *aobj; | 2543 | struct amdgpu_bo *aobj; |
| 2543 | int ret; | 2544 | int ret; |
| @@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 2578 | 2579 | ||
| 2579 | dce_v10_0_lock_cursor(crtc, true); | 2580 | dce_v10_0_lock_cursor(crtc, true); |
| 2580 | 2581 | ||
| 2581 | if (hot_x != amdgpu_crtc->cursor_hot_x || | 2582 | if (width != amdgpu_crtc->cursor_width || |
| 2583 | height != amdgpu_crtc->cursor_height || | ||
| 2584 | hot_x != amdgpu_crtc->cursor_hot_x || | ||
| 2582 | hot_y != amdgpu_crtc->cursor_hot_y) { | 2585 | hot_y != amdgpu_crtc->cursor_hot_y) { |
| 2583 | int x, y; | 2586 | int x, y; |
| 2584 | 2587 | ||
| @@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 2587 | 2590 | ||
| 2588 | dce_v10_0_cursor_move_locked(crtc, x, y); | 2591 | dce_v10_0_cursor_move_locked(crtc, x, y); |
| 2589 | 2592 | ||
| 2590 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
| 2591 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
| 2592 | } | ||
| 2593 | |||
| 2594 | if (width != amdgpu_crtc->cursor_width || | ||
| 2595 | height != amdgpu_crtc->cursor_height) { | ||
| 2596 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 2597 | (width - 1) << 16 | (height - 1)); | ||
| 2598 | amdgpu_crtc->cursor_width = width; | 2593 | amdgpu_crtc->cursor_width = width; |
| 2599 | amdgpu_crtc->cursor_height = height; | 2594 | amdgpu_crtc->cursor_height = height; |
| 2595 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
| 2596 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
| 2600 | } | 2597 | } |
| 2601 | 2598 | ||
| 2602 | dce_v10_0_show_cursor(crtc); | 2599 | dce_v10_0_show_cursor(crtc); |
| @@ -2620,7 +2617,6 @@ unpin: | |||
| 2620 | static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) | 2617 | static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) |
| 2621 | { | 2618 | { |
| 2622 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2619 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 2623 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
| 2624 | 2620 | ||
| 2625 | if (amdgpu_crtc->cursor_bo) { | 2621 | if (amdgpu_crtc->cursor_bo) { |
| 2626 | dce_v10_0_lock_cursor(crtc, true); | 2622 | dce_v10_0_lock_cursor(crtc, true); |
| @@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) | |||
| 2628 | dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, | 2624 | dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, |
| 2629 | amdgpu_crtc->cursor_y); | 2625 | amdgpu_crtc->cursor_y); |
| 2630 | 2626 | ||
| 2631 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 2632 | (amdgpu_crtc->cursor_width - 1) << 16 | | ||
| 2633 | (amdgpu_crtc->cursor_height - 1)); | ||
| 2634 | |||
| 2635 | dce_v10_0_show_cursor(crtc); | 2627 | dce_v10_0_show_cursor(crtc); |
| 2636 | 2628 | ||
| 2637 | dce_v10_0_lock_cursor(crtc, false); | 2629 | dce_v10_0_lock_cursor(crtc, false); |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 2006abbbfb62..a7af5b33a5e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
| @@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, | |||
| 2532 | 2532 | ||
| 2533 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | 2533 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); |
| 2534 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | 2534 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); |
| 2535 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 2536 | ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
| 2535 | 2537 | ||
| 2536 | return 0; | 2538 | return 0; |
| 2537 | } | 2539 | } |
| @@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 2557 | int32_t hot_y) | 2559 | int32_t hot_y) |
| 2558 | { | 2560 | { |
| 2559 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2561 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 2560 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
| 2561 | struct drm_gem_object *obj; | 2562 | struct drm_gem_object *obj; |
| 2562 | struct amdgpu_bo *aobj; | 2563 | struct amdgpu_bo *aobj; |
| 2563 | int ret; | 2564 | int ret; |
| @@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 2598 | 2599 | ||
| 2599 | dce_v11_0_lock_cursor(crtc, true); | 2600 | dce_v11_0_lock_cursor(crtc, true); |
| 2600 | 2601 | ||
| 2601 | if (hot_x != amdgpu_crtc->cursor_hot_x || | 2602 | if (width != amdgpu_crtc->cursor_width || |
| 2603 | height != amdgpu_crtc->cursor_height || | ||
| 2604 | hot_x != amdgpu_crtc->cursor_hot_x || | ||
| 2602 | hot_y != amdgpu_crtc->cursor_hot_y) { | 2605 | hot_y != amdgpu_crtc->cursor_hot_y) { |
| 2603 | int x, y; | 2606 | int x, y; |
| 2604 | 2607 | ||
| @@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 2607 | 2610 | ||
| 2608 | dce_v11_0_cursor_move_locked(crtc, x, y); | 2611 | dce_v11_0_cursor_move_locked(crtc, x, y); |
| 2609 | 2612 | ||
| 2610 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
| 2611 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
| 2612 | } | ||
| 2613 | |||
| 2614 | if (width != amdgpu_crtc->cursor_width || | ||
| 2615 | height != amdgpu_crtc->cursor_height) { | ||
| 2616 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 2617 | (width - 1) << 16 | (height - 1)); | ||
| 2618 | amdgpu_crtc->cursor_width = width; | 2613 | amdgpu_crtc->cursor_width = width; |
| 2619 | amdgpu_crtc->cursor_height = height; | 2614 | amdgpu_crtc->cursor_height = height; |
| 2615 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
| 2616 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
| 2620 | } | 2617 | } |
| 2621 | 2618 | ||
| 2622 | dce_v11_0_show_cursor(crtc); | 2619 | dce_v11_0_show_cursor(crtc); |
| @@ -2640,7 +2637,6 @@ unpin: | |||
| 2640 | static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) | 2637 | static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) |
| 2641 | { | 2638 | { |
| 2642 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2639 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 2643 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
| 2644 | 2640 | ||
| 2645 | if (amdgpu_crtc->cursor_bo) { | 2641 | if (amdgpu_crtc->cursor_bo) { |
| 2646 | dce_v11_0_lock_cursor(crtc, true); | 2642 | dce_v11_0_lock_cursor(crtc, true); |
| @@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) | |||
| 2648 | dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, | 2644 | dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, |
| 2649 | amdgpu_crtc->cursor_y); | 2645 | amdgpu_crtc->cursor_y); |
| 2650 | 2646 | ||
| 2651 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 2652 | (amdgpu_crtc->cursor_width - 1) << 16 | | ||
| 2653 | (amdgpu_crtc->cursor_height - 1)); | ||
| 2654 | |||
| 2655 | dce_v11_0_show_cursor(crtc); | 2647 | dce_v11_0_show_cursor(crtc); |
| 2656 | 2648 | ||
| 2657 | dce_v11_0_lock_cursor(crtc, false); | 2649 | dce_v11_0_lock_cursor(crtc, false); |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b4e4ec630e8c..39df6a50637f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
| @@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, | |||
| 1859 | struct amdgpu_device *adev = crtc->dev->dev_private; | 1859 | struct amdgpu_device *adev = crtc->dev->dev_private; |
| 1860 | int xorigin = 0, yorigin = 0; | 1860 | int xorigin = 0, yorigin = 0; |
| 1861 | 1861 | ||
| 1862 | int w = amdgpu_crtc->cursor_width; | ||
| 1863 | |||
| 1862 | amdgpu_crtc->cursor_x = x; | 1864 | amdgpu_crtc->cursor_x = x; |
| 1863 | amdgpu_crtc->cursor_y = y; | 1865 | amdgpu_crtc->cursor_y = y; |
| 1864 | 1866 | ||
| @@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, | |||
| 1878 | 1880 | ||
| 1879 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | 1881 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); |
| 1880 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | 1882 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); |
| 1883 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 1884 | ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
| 1881 | 1885 | ||
| 1882 | return 0; | 1886 | return 0; |
| 1883 | } | 1887 | } |
| @@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 1903 | int32_t hot_y) | 1907 | int32_t hot_y) |
| 1904 | { | 1908 | { |
| 1905 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 1909 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 1906 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
| 1907 | struct drm_gem_object *obj; | 1910 | struct drm_gem_object *obj; |
| 1908 | struct amdgpu_bo *aobj; | 1911 | struct amdgpu_bo *aobj; |
| 1909 | int ret; | 1912 | int ret; |
| @@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 1944 | 1947 | ||
| 1945 | dce_v6_0_lock_cursor(crtc, true); | 1948 | dce_v6_0_lock_cursor(crtc, true); |
| 1946 | 1949 | ||
| 1947 | if (hot_x != amdgpu_crtc->cursor_hot_x || | 1950 | if (width != amdgpu_crtc->cursor_width || |
| 1951 | height != amdgpu_crtc->cursor_height || | ||
| 1952 | hot_x != amdgpu_crtc->cursor_hot_x || | ||
| 1948 | hot_y != amdgpu_crtc->cursor_hot_y) { | 1953 | hot_y != amdgpu_crtc->cursor_hot_y) { |
| 1949 | int x, y; | 1954 | int x, y; |
| 1950 | 1955 | ||
| @@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 1953 | 1958 | ||
| 1954 | dce_v6_0_cursor_move_locked(crtc, x, y); | 1959 | dce_v6_0_cursor_move_locked(crtc, x, y); |
| 1955 | 1960 | ||
| 1956 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
| 1957 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
| 1958 | } | ||
| 1959 | |||
| 1960 | if (width != amdgpu_crtc->cursor_width || | ||
| 1961 | height != amdgpu_crtc->cursor_height) { | ||
| 1962 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 1963 | (width - 1) << 16 | (height - 1)); | ||
| 1964 | amdgpu_crtc->cursor_width = width; | 1961 | amdgpu_crtc->cursor_width = width; |
| 1965 | amdgpu_crtc->cursor_height = height; | 1962 | amdgpu_crtc->cursor_height = height; |
| 1963 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
| 1964 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
| 1966 | } | 1965 | } |
| 1967 | 1966 | ||
| 1968 | dce_v6_0_show_cursor(crtc); | 1967 | dce_v6_0_show_cursor(crtc); |
| @@ -1986,7 +1985,6 @@ unpin: | |||
| 1986 | static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) | 1985 | static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) |
| 1987 | { | 1986 | { |
| 1988 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 1987 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 1989 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
| 1990 | 1988 | ||
| 1991 | if (amdgpu_crtc->cursor_bo) { | 1989 | if (amdgpu_crtc->cursor_bo) { |
| 1992 | dce_v6_0_lock_cursor(crtc, true); | 1990 | dce_v6_0_lock_cursor(crtc, true); |
| @@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) | |||
| 1994 | dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, | 1992 | dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, |
| 1995 | amdgpu_crtc->cursor_y); | 1993 | amdgpu_crtc->cursor_y); |
| 1996 | 1994 | ||
| 1997 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 1998 | (amdgpu_crtc->cursor_width - 1) << 16 | | ||
| 1999 | (amdgpu_crtc->cursor_height - 1)); | ||
| 2000 | |||
| 2001 | dce_v6_0_show_cursor(crtc); | 1995 | dce_v6_0_show_cursor(crtc); |
| 2002 | dce_v6_0_lock_cursor(crtc, false); | 1996 | dce_v6_0_lock_cursor(crtc, false); |
| 2003 | } | 1997 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 584abe834a3c..28102bb1704d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
| @@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, | |||
| 2363 | 2363 | ||
| 2364 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | 2364 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); |
| 2365 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | 2365 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); |
| 2366 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 2367 | ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
| 2366 | 2368 | ||
| 2367 | return 0; | 2369 | return 0; |
| 2368 | } | 2370 | } |
| @@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 2388 | int32_t hot_y) | 2390 | int32_t hot_y) |
| 2389 | { | 2391 | { |
| 2390 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2392 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 2391 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
| 2392 | struct drm_gem_object *obj; | 2393 | struct drm_gem_object *obj; |
| 2393 | struct amdgpu_bo *aobj; | 2394 | struct amdgpu_bo *aobj; |
| 2394 | int ret; | 2395 | int ret; |
| @@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 2429 | 2430 | ||
| 2430 | dce_v8_0_lock_cursor(crtc, true); | 2431 | dce_v8_0_lock_cursor(crtc, true); |
| 2431 | 2432 | ||
| 2432 | if (hot_x != amdgpu_crtc->cursor_hot_x || | 2433 | if (width != amdgpu_crtc->cursor_width || |
| 2434 | height != amdgpu_crtc->cursor_height || | ||
| 2435 | hot_x != amdgpu_crtc->cursor_hot_x || | ||
| 2433 | hot_y != amdgpu_crtc->cursor_hot_y) { | 2436 | hot_y != amdgpu_crtc->cursor_hot_y) { |
| 2434 | int x, y; | 2437 | int x, y; |
| 2435 | 2438 | ||
| @@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
| 2438 | 2441 | ||
| 2439 | dce_v8_0_cursor_move_locked(crtc, x, y); | 2442 | dce_v8_0_cursor_move_locked(crtc, x, y); |
| 2440 | 2443 | ||
| 2441 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
| 2442 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
| 2443 | } | ||
| 2444 | |||
| 2445 | if (width != amdgpu_crtc->cursor_width || | ||
| 2446 | height != amdgpu_crtc->cursor_height) { | ||
| 2447 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 2448 | (width - 1) << 16 | (height - 1)); | ||
| 2449 | amdgpu_crtc->cursor_width = width; | 2444 | amdgpu_crtc->cursor_width = width; |
| 2450 | amdgpu_crtc->cursor_height = height; | 2445 | amdgpu_crtc->cursor_height = height; |
| 2446 | amdgpu_crtc->cursor_hot_x = hot_x; | ||
| 2447 | amdgpu_crtc->cursor_hot_y = hot_y; | ||
| 2451 | } | 2448 | } |
| 2452 | 2449 | ||
| 2453 | dce_v8_0_show_cursor(crtc); | 2450 | dce_v8_0_show_cursor(crtc); |
| @@ -2471,7 +2468,6 @@ unpin: | |||
| 2471 | static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) | 2468 | static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) |
| 2472 | { | 2469 | { |
| 2473 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2470 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 2474 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
| 2475 | 2471 | ||
| 2476 | if (amdgpu_crtc->cursor_bo) { | 2472 | if (amdgpu_crtc->cursor_bo) { |
| 2477 | dce_v8_0_lock_cursor(crtc, true); | 2473 | dce_v8_0_lock_cursor(crtc, true); |
| @@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) | |||
| 2479 | dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, | 2475 | dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, |
| 2480 | amdgpu_crtc->cursor_y); | 2476 | amdgpu_crtc->cursor_y); |
| 2481 | 2477 | ||
| 2482 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
| 2483 | (amdgpu_crtc->cursor_width - 1) << 16 | | ||
| 2484 | (amdgpu_crtc->cursor_height - 1)); | ||
| 2485 | |||
| 2486 | dce_v8_0_show_cursor(crtc); | 2478 | dce_v8_0_show_cursor(crtc); |
| 2487 | 2479 | ||
| 2488 | dce_v8_0_lock_cursor(crtc, false); | 2480 | dce_v8_0_lock_cursor(crtc, false); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 45a573e63d4a..e2b0b1646f99 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
| @@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin"); | |||
| 44 | MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); | 44 | MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); |
| 45 | MODULE_FIRMWARE("radeon/verde_mc.bin"); | 45 | MODULE_FIRMWARE("radeon/verde_mc.bin"); |
| 46 | MODULE_FIRMWARE("radeon/oland_mc.bin"); | 46 | MODULE_FIRMWARE("radeon/oland_mc.bin"); |
| 47 | MODULE_FIRMWARE("radeon/si58_mc.bin"); | ||
| 47 | 48 | ||
| 48 | #define MC_SEQ_MISC0__MT__MASK 0xf0000000 | 49 | #define MC_SEQ_MISC0__MT__MASK 0xf0000000 |
| 49 | #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 | 50 | #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 |
| @@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) | |||
| 113 | const char *chip_name; | 114 | const char *chip_name; |
| 114 | char fw_name[30]; | 115 | char fw_name[30]; |
| 115 | int err; | 116 | int err; |
| 117 | bool is_58_fw = false; | ||
| 116 | 118 | ||
| 117 | DRM_DEBUG("\n"); | 119 | DRM_DEBUG("\n"); |
| 118 | 120 | ||
| @@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) | |||
| 135 | default: BUG(); | 137 | default: BUG(); |
| 136 | } | 138 | } |
| 137 | 139 | ||
| 138 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | 140 | /* this memory configuration requires special firmware */ |
| 141 | if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) | ||
| 142 | is_58_fw = true; | ||
| 143 | |||
| 144 | if (is_58_fw) | ||
| 145 | snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); | ||
| 146 | else | ||
| 147 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | ||
| 139 | err = request_firmware(&adev->mc.fw, fw_name, adev->dev); | 148 | err = request_firmware(&adev->mc.fw, fw_name, adev->dev); |
| 140 | if (err) | 149 | if (err) |
| 141 | goto out; | 150 | goto out; |
| @@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) | |||
| 463 | WREG32(mmVM_CONTEXT1_CNTL, | 472 | WREG32(mmVM_CONTEXT1_CNTL, |
| 464 | VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | | 473 | VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | |
| 465 | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | | 474 | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | |
| 466 | ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) | | 475 | ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); |
| 467 | VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | 476 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) |
| 468 | VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | 477 | gmc_v6_0_set_fault_enable_default(adev, false); |
| 469 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | 478 | else |
| 470 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | 479 | gmc_v6_0_set_fault_enable_default(adev, true); |
| 471 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
| 472 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | ||
| 473 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
| 474 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | ||
| 475 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
| 476 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK | | ||
| 477 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
| 478 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK); | ||
| 479 | 480 | ||
| 480 | gmc_v6_0_gart_flush_gpu_tlb(adev, 0); | 481 | gmc_v6_0_gart_flush_gpu_tlb(adev, 0); |
| 481 | dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", | 482 | dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", |
| @@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle) | |||
| 754 | { | 755 | { |
| 755 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 756 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 756 | 757 | ||
| 757 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); | 758 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) |
| 759 | return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); | ||
| 760 | else | ||
| 761 | return 0; | ||
| 758 | } | 762 | } |
| 759 | 763 | ||
| 760 | static int gmc_v6_0_sw_init(void *handle) | 764 | static int gmc_v6_0_sw_init(void *handle) |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 10bedfac27b8..6e150db8f380 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
| @@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin"); | |||
| 64 | MODULE_FIRMWARE("radeon/oland_k_smc.bin"); | 64 | MODULE_FIRMWARE("radeon/oland_k_smc.bin"); |
| 65 | MODULE_FIRMWARE("radeon/hainan_smc.bin"); | 65 | MODULE_FIRMWARE("radeon/hainan_smc.bin"); |
| 66 | MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); | 66 | MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); |
| 67 | MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); | ||
| 67 | 68 | ||
| 68 | union power_info { | 69 | union power_info { |
| 69 | struct _ATOM_POWERPLAY_INFO info; | 70 | struct _ATOM_POWERPLAY_INFO info; |
| @@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
| 3487 | (adev->pdev->device == 0x6817) || | 3488 | (adev->pdev->device == 0x6817) || |
| 3488 | (adev->pdev->device == 0x6806)) | 3489 | (adev->pdev->device == 0x6806)) |
| 3489 | max_mclk = 120000; | 3490 | max_mclk = 120000; |
| 3490 | } else if (adev->asic_type == CHIP_OLAND) { | ||
| 3491 | if ((adev->pdev->revision == 0xC7) || | ||
| 3492 | (adev->pdev->revision == 0x80) || | ||
| 3493 | (adev->pdev->revision == 0x81) || | ||
| 3494 | (adev->pdev->revision == 0x83) || | ||
| 3495 | (adev->pdev->revision == 0x87) || | ||
| 3496 | (adev->pdev->device == 0x6604) || | ||
| 3497 | (adev->pdev->device == 0x6605)) { | ||
| 3498 | max_sclk = 75000; | ||
| 3499 | max_mclk = 80000; | ||
| 3500 | } | ||
| 3501 | } else if (adev->asic_type == CHIP_HAINAN) { | 3491 | } else if (adev->asic_type == CHIP_HAINAN) { |
| 3502 | if ((adev->pdev->revision == 0x81) || | 3492 | if ((adev->pdev->revision == 0x81) || |
| 3503 | (adev->pdev->revision == 0x83) || | 3493 | (adev->pdev->revision == 0x83) || |
| @@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
| 3506 | (adev->pdev->device == 0x6665) || | 3496 | (adev->pdev->device == 0x6665) || |
| 3507 | (adev->pdev->device == 0x6667)) { | 3497 | (adev->pdev->device == 0x6667)) { |
| 3508 | max_sclk = 75000; | 3498 | max_sclk = 75000; |
| 3509 | max_mclk = 80000; | ||
| 3510 | } | 3499 | } |
| 3511 | } | 3500 | } |
| 3512 | /* Apply dpm quirks */ | 3501 | /* Apply dpm quirks */ |
| @@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) | |||
| 7713 | ((adev->pdev->device == 0x6660) || | 7702 | ((adev->pdev->device == 0x6660) || |
| 7714 | (adev->pdev->device == 0x6663) || | 7703 | (adev->pdev->device == 0x6663) || |
| 7715 | (adev->pdev->device == 0x6665) || | 7704 | (adev->pdev->device == 0x6665) || |
| 7716 | (adev->pdev->device == 0x6667))) || | 7705 | (adev->pdev->device == 0x6667)))) |
| 7717 | ((adev->pdev->revision == 0xc3) && | ||
| 7718 | (adev->pdev->device == 0x6665))) | ||
| 7719 | chip_name = "hainan_k"; | 7706 | chip_name = "hainan_k"; |
| 7707 | else if ((adev->pdev->revision == 0xc3) && | ||
| 7708 | (adev->pdev->device == 0x6665)) | ||
| 7709 | chip_name = "banks_k_2"; | ||
| 7720 | else | 7710 | else |
| 7721 | chip_name = "hainan"; | 7711 | chip_name = "hainan"; |
| 7722 | break; | 7712 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 96444e4d862a..7fb9137dd89b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
| @@ -40,13 +40,14 @@ | |||
| 40 | #include "smu/smu_7_0_1_sh_mask.h" | 40 | #include "smu/smu_7_0_1_sh_mask.h" |
| 41 | 41 | ||
| 42 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); | 42 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); |
| 43 | static void uvd_v4_2_init_cg(struct amdgpu_device *adev); | ||
| 44 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); | 43 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); |
| 45 | static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); | 44 | static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); |
| 46 | static int uvd_v4_2_start(struct amdgpu_device *adev); | 45 | static int uvd_v4_2_start(struct amdgpu_device *adev); |
| 47 | static void uvd_v4_2_stop(struct amdgpu_device *adev); | 46 | static void uvd_v4_2_stop(struct amdgpu_device *adev); |
| 48 | static int uvd_v4_2_set_clockgating_state(void *handle, | 47 | static int uvd_v4_2_set_clockgating_state(void *handle, |
| 49 | enum amd_clockgating_state state); | 48 | enum amd_clockgating_state state); |
| 49 | static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, | ||
| 50 | bool sw_mode); | ||
| 50 | /** | 51 | /** |
| 51 | * uvd_v4_2_ring_get_rptr - get read pointer | 52 | * uvd_v4_2_ring_get_rptr - get read pointer |
| 52 | * | 53 | * |
| @@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle) | |||
| 140 | 141 | ||
| 141 | return r; | 142 | return r; |
| 142 | } | 143 | } |
| 143 | 144 | static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, | |
| 145 | bool enable); | ||
| 144 | /** | 146 | /** |
| 145 | * uvd_v4_2_hw_init - start and test UVD block | 147 | * uvd_v4_2_hw_init - start and test UVD block |
| 146 | * | 148 | * |
| @@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle) | |||
| 155 | uint32_t tmp; | 157 | uint32_t tmp; |
| 156 | int r; | 158 | int r; |
| 157 | 159 | ||
| 158 | uvd_v4_2_init_cg(adev); | 160 | uvd_v4_2_enable_mgcg(adev, true); |
| 159 | uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE); | ||
| 160 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); | 161 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
| 161 | r = uvd_v4_2_start(adev); | 162 | r = uvd_v4_2_start(adev); |
| 162 | if (r) | 163 | if (r) |
| @@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) | |||
| 266 | struct amdgpu_ring *ring = &adev->uvd.ring; | 267 | struct amdgpu_ring *ring = &adev->uvd.ring; |
| 267 | uint32_t rb_bufsz; | 268 | uint32_t rb_bufsz; |
| 268 | int i, j, r; | 269 | int i, j, r; |
| 269 | |||
| 270 | /* disable byte swapping */ | 270 | /* disable byte swapping */ |
| 271 | u32 lmi_swap_cntl = 0; | 271 | u32 lmi_swap_cntl = 0; |
| 272 | u32 mp_swap_cntl = 0; | 272 | u32 mp_swap_cntl = 0; |
| 273 | 273 | ||
| 274 | WREG32(mmUVD_CGC_GATE, 0); | ||
| 275 | uvd_v4_2_set_dcm(adev, true); | ||
| 276 | |||
| 274 | uvd_v4_2_mc_resume(adev); | 277 | uvd_v4_2_mc_resume(adev); |
| 275 | 278 | ||
| 276 | /* disable interupt */ | 279 | /* disable interupt */ |
| @@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev) | |||
| 406 | 409 | ||
| 407 | /* Unstall UMC and register bus */ | 410 | /* Unstall UMC and register bus */ |
| 408 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | 411 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); |
| 412 | |||
| 413 | uvd_v4_2_set_dcm(adev, false); | ||
| 409 | } | 414 | } |
| 410 | 415 | ||
| 411 | /** | 416 | /** |
| @@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, | |||
| 619 | WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); | 624 | WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); |
| 620 | } | 625 | } |
| 621 | 626 | ||
| 622 | static void uvd_v4_2_init_cg(struct amdgpu_device *adev) | ||
| 623 | { | ||
| 624 | bool hw_mode = true; | ||
| 625 | |||
| 626 | if (hw_mode) { | ||
| 627 | uvd_v4_2_set_dcm(adev, false); | ||
| 628 | } else { | ||
| 629 | u32 tmp = RREG32(mmUVD_CGC_CTRL); | ||
| 630 | tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; | ||
| 631 | WREG32(mmUVD_CGC_CTRL, tmp); | ||
| 632 | } | ||
| 633 | } | ||
| 634 | |||
| 635 | static bool uvd_v4_2_is_idle(void *handle) | 627 | static bool uvd_v4_2_is_idle(void *handle) |
| 636 | { | 628 | { |
| 637 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 629 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| @@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, | |||
| 685 | static int uvd_v4_2_set_clockgating_state(void *handle, | 677 | static int uvd_v4_2_set_clockgating_state(void *handle, |
| 686 | enum amd_clockgating_state state) | 678 | enum amd_clockgating_state state) |
| 687 | { | 679 | { |
| 688 | bool gate = false; | ||
| 689 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 690 | |||
| 691 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) | ||
| 692 | return 0; | ||
| 693 | |||
| 694 | if (state == AMD_CG_STATE_GATE) | ||
| 695 | gate = true; | ||
| 696 | |||
| 697 | uvd_v4_2_enable_mgcg(adev, gate); | ||
| 698 | |||
| 699 | return 0; | 680 | return 0; |
| 700 | } | 681 | } |
| 701 | 682 | ||
| @@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle, | |||
| 711 | */ | 692 | */ |
| 712 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 693 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 713 | 694 | ||
| 714 | if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) | ||
| 715 | return 0; | ||
| 716 | |||
| 717 | if (state == AMD_PG_STATE_GATE) { | 695 | if (state == AMD_PG_STATE_GATE) { |
| 718 | uvd_v4_2_stop(adev); | 696 | uvd_v4_2_stop(adev); |
| 719 | return 0; | 697 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 5fb0b7f5c065..37ca685e5a9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
| @@ -43,9 +43,13 @@ | |||
| 43 | 43 | ||
| 44 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 | 44 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 |
| 45 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 | 45 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 |
| 46 | #define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 | ||
| 47 | |||
| 46 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 | 48 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 |
| 47 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 | 49 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 |
| 48 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 | 50 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 |
| 51 | #define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 | ||
| 52 | |||
| 49 | #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 | 53 | #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 |
| 50 | 54 | ||
| 51 | #define VCE_V3_0_FW_SIZE (384 * 1024) | 55 | #define VCE_V3_0_FW_SIZE (384 * 1024) |
| @@ -54,6 +58,9 @@ | |||
| 54 | 58 | ||
| 55 | #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) | 59 | #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) |
| 56 | 60 | ||
| 61 | #define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ | ||
| 62 | | GRBM_GFX_INDEX__VCE_ALL_PIPE) | ||
| 63 | |||
| 57 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); | 64 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); |
| 58 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); | 65 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); |
| 59 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); | 66 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); |
| @@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, | |||
| 175 | WREG32(mmVCE_UENC_CLOCK_GATING_2, data); | 182 | WREG32(mmVCE_UENC_CLOCK_GATING_2, data); |
| 176 | 183 | ||
| 177 | data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); | 184 | data = RREG32(mmVCE_UENC_REG_CLOCK_GATING); |
| 178 | data &= ~0xffc00000; | 185 | data &= ~0x3ff; |
| 179 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); | 186 | WREG32(mmVCE_UENC_REG_CLOCK_GATING, data); |
| 180 | 187 | ||
| 181 | data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); | 188 | data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL); |
| @@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
| 249 | if (adev->vce.harvest_config & (1 << idx)) | 256 | if (adev->vce.harvest_config & (1 << idx)) |
| 250 | continue; | 257 | continue; |
| 251 | 258 | ||
| 252 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); | 259 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); |
| 253 | vce_v3_0_mc_resume(adev, idx); | 260 | vce_v3_0_mc_resume(adev, idx); |
| 254 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); | 261 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); |
| 255 | 262 | ||
| @@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
| 273 | } | 280 | } |
| 274 | } | 281 | } |
| 275 | 282 | ||
| 276 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); | 283 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); |
| 277 | mutex_unlock(&adev->grbm_idx_mutex); | 284 | mutex_unlock(&adev->grbm_idx_mutex); |
| 278 | 285 | ||
| 279 | return 0; | 286 | return 0; |
| @@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) | |||
| 288 | if (adev->vce.harvest_config & (1 << idx)) | 295 | if (adev->vce.harvest_config & (1 << idx)) |
| 289 | continue; | 296 | continue; |
| 290 | 297 | ||
| 291 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); | 298 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); |
| 292 | 299 | ||
| 293 | if (adev->asic_type >= CHIP_STONEY) | 300 | if (adev->asic_type >= CHIP_STONEY) |
| 294 | WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); | 301 | WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); |
| @@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) | |||
| 306 | vce_v3_0_set_vce_sw_clock_gating(adev, false); | 313 | vce_v3_0_set_vce_sw_clock_gating(adev, false); |
| 307 | } | 314 | } |
| 308 | 315 | ||
| 309 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); | 316 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); |
| 310 | mutex_unlock(&adev->grbm_idx_mutex); | 317 | mutex_unlock(&adev->grbm_idx_mutex); |
| 311 | 318 | ||
| 312 | return 0; | 319 | return 0; |
| @@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle) | |||
| 586 | * VCE team suggest use bit 3--bit 6 for busy status check | 593 | * VCE team suggest use bit 3--bit 6 for busy status check |
| 587 | */ | 594 | */ |
| 588 | mutex_lock(&adev->grbm_idx_mutex); | 595 | mutex_lock(&adev->grbm_idx_mutex); |
| 589 | WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); | 596 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); |
| 590 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { | 597 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { |
| 591 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); | 598 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); |
| 592 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); | 599 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); |
| 593 | } | 600 | } |
| 594 | WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); | 601 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); |
| 595 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { | 602 | if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { |
| 596 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); | 603 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); |
| 597 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); | 604 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); |
| 598 | } | 605 | } |
| 599 | WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); | 606 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); |
| 600 | mutex_unlock(&adev->grbm_idx_mutex); | 607 | mutex_unlock(&adev->grbm_idx_mutex); |
| 601 | 608 | ||
| 602 | if (srbm_soft_reset) { | 609 | if (srbm_soft_reset) { |
| @@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, | |||
| 734 | if (adev->vce.harvest_config & (1 << i)) | 741 | if (adev->vce.harvest_config & (1 << i)) |
| 735 | continue; | 742 | continue; |
| 736 | 743 | ||
| 737 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); | 744 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); |
| 738 | 745 | ||
| 739 | if (enable) { | 746 | if (enable) { |
| 740 | /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ | 747 | /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ |
| @@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, | |||
| 753 | vce_v3_0_set_vce_sw_clock_gating(adev, enable); | 760 | vce_v3_0_set_vce_sw_clock_gating(adev, enable); |
| 754 | } | 761 | } |
| 755 | 762 | ||
| 756 | WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); | 763 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); |
| 757 | mutex_unlock(&adev->grbm_idx_mutex); | 764 | mutex_unlock(&adev->grbm_idx_mutex); |
| 758 | 765 | ||
| 759 | return 0; | 766 | return 0; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index b0c63c5f54c9..6bb79c94cb9f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c | |||
| @@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 200 | cgs_set_clockgating_state( | 200 | cgs_set_clockgating_state( |
| 201 | hwmgr->device, | 201 | hwmgr->device, |
| 202 | AMD_IP_BLOCK_TYPE_VCE, | 202 | AMD_IP_BLOCK_TYPE_VCE, |
| 203 | AMD_CG_STATE_UNGATE); | 203 | AMD_CG_STATE_GATE); |
| 204 | cgs_set_powergating_state( | 204 | cgs_set_powergating_state( |
| 205 | hwmgr->device, | 205 | hwmgr->device, |
| 206 | AMD_IP_BLOCK_TYPE_VCE, | 206 | AMD_IP_BLOCK_TYPE_VCE, |
| @@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 218 | cgs_set_clockgating_state( | 218 | cgs_set_clockgating_state( |
| 219 | hwmgr->device, | 219 | hwmgr->device, |
| 220 | AMD_IP_BLOCK_TYPE_VCE, | 220 | AMD_IP_BLOCK_TYPE_VCE, |
| 221 | AMD_PG_STATE_GATE); | 221 | AMD_PG_STATE_UNGATE); |
| 222 | cz_dpm_update_vce_dpm(hwmgr); | 222 | cz_dpm_update_vce_dpm(hwmgr); |
| 223 | cz_enable_disable_vce_dpm(hwmgr, true); | 223 | cz_enable_disable_vce_dpm(hwmgr, true); |
| 224 | return 0; | 224 | return 0; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 4b14f259a147..0fb4e8c8f5e1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | |||
| @@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) | |||
| 1402 | cz_hwmgr->vce_dpm.hard_min_clk, | 1402 | cz_hwmgr->vce_dpm.hard_min_clk, |
| 1403 | PPSMC_MSG_SetEclkHardMin)); | 1403 | PPSMC_MSG_SetEclkHardMin)); |
| 1404 | } else { | 1404 | } else { |
| 1405 | /*EPR# 419220 -HW limitation to to */ | 1405 | /*Program HardMin based on the vce_arbiter.ecclk */ |
| 1406 | cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; | 1406 | if (hwmgr->vce_arbiter.ecclk == 0) { |
| 1407 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | 1407 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, |
| 1408 | PPSMC_MSG_SetEclkHardMin, | 1408 | PPSMC_MSG_SetEclkHardMin, 0); |
| 1409 | cz_get_eclk_level(hwmgr, | 1409 | /* disable ECLK DPM 0. Otherwise VCE could hang if |
| 1410 | cz_hwmgr->vce_dpm.hard_min_clk, | 1410 | * switching SCLK from DPM 0 to 6/7 */ |
| 1411 | PPSMC_MSG_SetEclkHardMin)); | 1411 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, |
| 1412 | 1412 | PPSMC_MSG_SetEclkSoftMin, 1); | |
| 1413 | } else { | ||
| 1414 | cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; | ||
| 1415 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
| 1416 | PPSMC_MSG_SetEclkHardMin, | ||
| 1417 | cz_get_eclk_level(hwmgr, | ||
| 1418 | cz_hwmgr->vce_dpm.hard_min_clk, | ||
| 1419 | PPSMC_MSG_SetEclkHardMin)); | ||
| 1420 | } | ||
| 1413 | } | 1421 | } |
| 1414 | return 0; | 1422 | return 0; |
| 1415 | } | 1423 | } |
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index eb9bf8786c24..18eefdcbf1ba 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | |||
| @@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, | |||
| 1382 | 1382 | ||
| 1383 | pm_runtime_enable(dev); | 1383 | pm_runtime_enable(dev); |
| 1384 | 1384 | ||
| 1385 | pm_runtime_get_sync(dev); | ||
| 1385 | phy_power_on(dp->phy); | 1386 | phy_power_on(dp->phy); |
| 1386 | 1387 | ||
| 1387 | analogix_dp_init_dp(dp); | 1388 | analogix_dp_init_dp(dp); |
| @@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, | |||
| 1414 | goto err_disable_pm_runtime; | 1415 | goto err_disable_pm_runtime; |
| 1415 | } | 1416 | } |
| 1416 | 1417 | ||
| 1418 | phy_power_off(dp->phy); | ||
| 1419 | pm_runtime_put(dev); | ||
| 1420 | |||
| 1417 | return 0; | 1421 | return 0; |
| 1418 | 1422 | ||
| 1419 | err_disable_pm_runtime: | 1423 | err_disable_pm_runtime: |
| 1424 | |||
| 1425 | phy_power_off(dp->phy); | ||
| 1426 | pm_runtime_put(dev); | ||
| 1420 | pm_runtime_disable(dev); | 1427 | pm_runtime_disable(dev); |
| 1421 | 1428 | ||
| 1422 | return ret; | 1429 | return ret; |
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig index 04b3c161dfae..7f4cc6e172ab 100644 --- a/drivers/gpu/drm/cirrus/Kconfig +++ b/drivers/gpu/drm/cirrus/Kconfig | |||
| @@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU | |||
| 7 | This is a KMS driver for emulated cirrus device in qemu. | 7 | This is a KMS driver for emulated cirrus device in qemu. |
| 8 | It is *NOT* intended for real cirrus devices. This requires | 8 | It is *NOT* intended for real cirrus devices. This requires |
| 9 | the modesetting userspace X.org driver. | 9 | the modesetting userspace X.org driver. |
| 10 | |||
| 11 | Cirrus is obsolete, the hardware was designed in the 90ies | ||
| 12 | and can't keep up with todays needs. More background: | ||
| 13 | https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/ | ||
| 14 | |||
| 15 | Better alternatives are: | ||
| 16 | - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+) | ||
| 17 | - qxl (DRM_QXL, qemu -vga qxl, works best with spice) | ||
| 18 | - virtio (DRM_VIRTIO_GPU), qemu -vga virtio) | ||
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index ac6a35212501..e6b19bc9021a 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
| @@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev, | |||
| 1460 | return NULL; | 1460 | return NULL; |
| 1461 | 1461 | ||
| 1462 | mode->type |= DRM_MODE_TYPE_USERDEF; | 1462 | mode->type |= DRM_MODE_TYPE_USERDEF; |
| 1463 | /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */ | ||
| 1464 | if (cmd->xres == 1366 && mode->hdisplay == 1368) { | ||
| 1465 | mode->hdisplay = 1366; | ||
| 1466 | mode->hsync_start--; | ||
| 1467 | mode->hsync_end--; | ||
| 1468 | drm_mode_set_name(mode); | ||
| 1469 | } | ||
| 1463 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | 1470 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
| 1464 | return mode; | 1471 | return mode; |
| 1465 | } | 1472 | } |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index ac953f037be7..b452a7ccd84b 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
| @@ -115,24 +115,27 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) | |||
| 115 | 115 | ||
| 116 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) | 116 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) |
| 117 | /** | 117 | /** |
| 118 | * drm_kms_helper_poll_enable_locked - re-enable output polling. | 118 | * drm_kms_helper_poll_enable - re-enable output polling. |
| 119 | * @dev: drm_device | 119 | * @dev: drm_device |
| 120 | * | 120 | * |
| 121 | * This function re-enables the output polling work without | 121 | * This function re-enables the output polling work, after it has been |
| 122 | * locking the mode_config mutex. | 122 | * temporarily disabled using drm_kms_helper_poll_disable(), for example over |
| 123 | * suspend/resume. | ||
| 123 | * | 124 | * |
| 124 | * This is like drm_kms_helper_poll_enable() however it is to be | 125 | * Drivers can call this helper from their device resume implementation. It is |
| 125 | * called from a context where the mode_config mutex is locked | 126 | * an error to call this when the output polling support has not yet been set |
| 126 | * already. | 127 | * up. |
| 128 | * | ||
| 129 | * Note that calls to enable and disable polling must be strictly ordered, which | ||
| 130 | * is automatically the case when they're only call from suspend/resume | ||
| 131 | * callbacks. | ||
| 127 | */ | 132 | */ |
| 128 | void drm_kms_helper_poll_enable_locked(struct drm_device *dev) | 133 | void drm_kms_helper_poll_enable(struct drm_device *dev) |
| 129 | { | 134 | { |
| 130 | bool poll = false; | 135 | bool poll = false; |
| 131 | struct drm_connector *connector; | 136 | struct drm_connector *connector; |
| 132 | unsigned long delay = DRM_OUTPUT_POLL_PERIOD; | 137 | unsigned long delay = DRM_OUTPUT_POLL_PERIOD; |
| 133 | 138 | ||
| 134 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | ||
| 135 | |||
| 136 | if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) | 139 | if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) |
| 137 | return; | 140 | return; |
| 138 | 141 | ||
| @@ -143,14 +146,24 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) | |||
| 143 | } | 146 | } |
| 144 | 147 | ||
| 145 | if (dev->mode_config.delayed_event) { | 148 | if (dev->mode_config.delayed_event) { |
| 149 | /* | ||
| 150 | * FIXME: | ||
| 151 | * | ||
| 152 | * Use short (1s) delay to handle the initial delayed event. | ||
| 153 | * This delay should not be needed, but Optimus/nouveau will | ||
| 154 | * fail in a mysterious way if the delayed event is handled as | ||
| 155 | * soon as possible like it is done in | ||
| 156 | * drm_helper_probe_single_connector_modes() in case the poll | ||
| 157 | * was enabled before. | ||
| 158 | */ | ||
| 146 | poll = true; | 159 | poll = true; |
| 147 | delay = 0; | 160 | delay = HZ; |
| 148 | } | 161 | } |
| 149 | 162 | ||
| 150 | if (poll) | 163 | if (poll) |
| 151 | schedule_delayed_work(&dev->mode_config.output_poll_work, delay); | 164 | schedule_delayed_work(&dev->mode_config.output_poll_work, delay); |
| 152 | } | 165 | } |
| 153 | EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); | 166 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); |
| 154 | 167 | ||
| 155 | static enum drm_connector_status | 168 | static enum drm_connector_status |
| 156 | drm_connector_detect(struct drm_connector *connector, bool force) | 169 | drm_connector_detect(struct drm_connector *connector, bool force) |
| @@ -277,7 +290,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
| 277 | 290 | ||
| 278 | /* Re-enable polling in case the global poll config changed. */ | 291 | /* Re-enable polling in case the global poll config changed. */ |
| 279 | if (drm_kms_helper_poll != dev->mode_config.poll_running) | 292 | if (drm_kms_helper_poll != dev->mode_config.poll_running) |
| 280 | drm_kms_helper_poll_enable_locked(dev); | 293 | drm_kms_helper_poll_enable(dev); |
| 281 | 294 | ||
| 282 | dev->mode_config.poll_running = drm_kms_helper_poll; | 295 | dev->mode_config.poll_running = drm_kms_helper_poll; |
| 283 | 296 | ||
| @@ -469,8 +482,12 @@ out: | |||
| 469 | * This function disables the output polling work. | 482 | * This function disables the output polling work. |
| 470 | * | 483 | * |
| 471 | * Drivers can call this helper from their device suspend implementation. It is | 484 | * Drivers can call this helper from their device suspend implementation. It is |
| 472 | * not an error to call this even when output polling isn't enabled or arlready | 485 | * not an error to call this even when output polling isn't enabled or already |
| 473 | * disabled. | 486 | * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable(). |
| 487 | * | ||
| 488 | * Note that calls to enable and disable polling must be strictly ordered, which | ||
| 489 | * is automatically the case when they're only call from suspend/resume | ||
| 490 | * callbacks. | ||
| 474 | */ | 491 | */ |
| 475 | void drm_kms_helper_poll_disable(struct drm_device *dev) | 492 | void drm_kms_helper_poll_disable(struct drm_device *dev) |
| 476 | { | 493 | { |
| @@ -481,24 +498,6 @@ void drm_kms_helper_poll_disable(struct drm_device *dev) | |||
| 481 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); | 498 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); |
| 482 | 499 | ||
| 483 | /** | 500 | /** |
| 484 | * drm_kms_helper_poll_enable - re-enable output polling. | ||
| 485 | * @dev: drm_device | ||
| 486 | * | ||
| 487 | * This function re-enables the output polling work. | ||
| 488 | * | ||
| 489 | * Drivers can call this helper from their device resume implementation. It is | ||
| 490 | * an error to call this when the output polling support has not yet been set | ||
| 491 | * up. | ||
| 492 | */ | ||
| 493 | void drm_kms_helper_poll_enable(struct drm_device *dev) | ||
| 494 | { | ||
| 495 | mutex_lock(&dev->mode_config.mutex); | ||
| 496 | drm_kms_helper_poll_enable_locked(dev); | ||
| 497 | mutex_unlock(&dev->mode_config.mutex); | ||
| 498 | } | ||
| 499 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); | ||
| 500 | |||
| 501 | /** | ||
| 502 | * drm_kms_helper_poll_init - initialize and enable output polling | 501 | * drm_kms_helper_poll_init - initialize and enable output polling |
| 503 | * @dev: drm_device | 502 | * @dev: drm_device |
| 504 | * | 503 | * |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 169ac96e8f08..fe0e85b41310 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c | |||
| @@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, | |||
| 116 | struct list_head list; | 116 | struct list_head list; |
| 117 | bool found; | 117 | bool found; |
| 118 | 118 | ||
| 119 | /* | ||
| 120 | * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick | ||
| 121 | * drm_mm into giving out a low IOVA after address space | ||
| 122 | * rollover. This needs a proper fix. | ||
| 123 | */ | ||
| 119 | ret = drm_mm_insert_node_in_range(&mmu->mm, node, | 124 | ret = drm_mm_insert_node_in_range(&mmu->mm, node, |
| 120 | size, 0, mmu->last_iova, ~0UL, | 125 | size, 0, mmu->last_iova, ~0UL, |
| 121 | DRM_MM_SEARCH_DEFAULT); | 126 | mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); |
| 122 | 127 | ||
| 123 | if (ret != -ENOSPC) | 128 | if (ret != -ENOSPC) |
| 124 | break; | 129 | break; |
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 6ca1f3117fe8..75eeb831ed6a 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c | |||
| @@ -46,7 +46,8 @@ enum decon_flag_bits { | |||
| 46 | BIT_CLKS_ENABLED, | 46 | BIT_CLKS_ENABLED, |
| 47 | BIT_IRQS_ENABLED, | 47 | BIT_IRQS_ENABLED, |
| 48 | BIT_WIN_UPDATED, | 48 | BIT_WIN_UPDATED, |
| 49 | BIT_SUSPENDED | 49 | BIT_SUSPENDED, |
| 50 | BIT_REQUEST_UPDATE | ||
| 50 | }; | 51 | }; |
| 51 | 52 | ||
| 52 | struct decon_context { | 53 | struct decon_context { |
| @@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc) | |||
| 141 | m->crtc_vsync_end = m->crtc_vsync_start + 1; | 142 | m->crtc_vsync_end = m->crtc_vsync_start + 1; |
| 142 | } | 143 | } |
| 143 | 144 | ||
| 144 | decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0); | ||
| 145 | |||
| 146 | /* enable clock gate */ | ||
| 147 | val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F; | ||
| 148 | writel(val, ctx->addr + DECON_CMU); | ||
| 149 | |||
| 150 | if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) | 145 | if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) |
| 151 | decon_setup_trigger(ctx); | 146 | decon_setup_trigger(ctx); |
| 152 | 147 | ||
| @@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, | |||
| 315 | 310 | ||
| 316 | /* window enable */ | 311 | /* window enable */ |
| 317 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); | 312 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0); |
| 313 | set_bit(BIT_REQUEST_UPDATE, &ctx->flags); | ||
| 318 | } | 314 | } |
| 319 | 315 | ||
| 320 | static void decon_disable_plane(struct exynos_drm_crtc *crtc, | 316 | static void decon_disable_plane(struct exynos_drm_crtc *crtc, |
| @@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, | |||
| 327 | return; | 323 | return; |
| 328 | 324 | ||
| 329 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); | 325 | decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0); |
| 326 | set_bit(BIT_REQUEST_UPDATE, &ctx->flags); | ||
| 330 | } | 327 | } |
| 331 | 328 | ||
| 332 | static void decon_atomic_flush(struct exynos_drm_crtc *crtc) | 329 | static void decon_atomic_flush(struct exynos_drm_crtc *crtc) |
| @@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) | |||
| 340 | for (i = ctx->first_win; i < WINDOWS_NR; i++) | 337 | for (i = ctx->first_win; i < WINDOWS_NR; i++) |
| 341 | decon_shadow_protect_win(ctx, i, false); | 338 | decon_shadow_protect_win(ctx, i, false); |
| 342 | 339 | ||
| 343 | /* standalone update */ | 340 | if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags)) |
| 344 | decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); | 341 | decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0); |
| 345 | 342 | ||
| 346 | if (ctx->out_type & IFTYPE_I80) | 343 | if (ctx->out_type & IFTYPE_I80) |
| 347 | set_bit(BIT_WIN_UPDATED, &ctx->flags); | 344 | set_bit(BIT_WIN_UPDATED, &ctx->flags); |
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 0d41ebc4aea6..f7bce8603958 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c | |||
| @@ -37,13 +37,6 @@ | |||
| 37 | #include "i915_drv.h" | 37 | #include "i915_drv.h" |
| 38 | #include "gvt.h" | 38 | #include "gvt.h" |
| 39 | 39 | ||
| 40 | #define MB_TO_BYTES(mb) ((mb) << 20ULL) | ||
| 41 | #define BYTES_TO_MB(b) ((b) >> 20ULL) | ||
| 42 | |||
| 43 | #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) | ||
| 44 | #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) | ||
| 45 | #define HOST_FENCE 4 | ||
| 46 | |||
| 47 | static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) | 40 | static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) |
| 48 | { | 41 | { |
| 49 | struct intel_gvt *gvt = vgpu->gvt; | 42 | struct intel_gvt *gvt = vgpu->gvt; |
| @@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, | |||
| 165 | POSTING_READ(fence_reg_lo); | 158 | POSTING_READ(fence_reg_lo); |
| 166 | } | 159 | } |
| 167 | 160 | ||
| 161 | static void _clear_vgpu_fence(struct intel_vgpu *vgpu) | ||
| 162 | { | ||
| 163 | int i; | ||
| 164 | |||
| 165 | for (i = 0; i < vgpu_fence_sz(vgpu); i++) | ||
| 166 | intel_vgpu_write_fence(vgpu, i, 0); | ||
| 167 | } | ||
| 168 | |||
| 168 | static void free_vgpu_fence(struct intel_vgpu *vgpu) | 169 | static void free_vgpu_fence(struct intel_vgpu *vgpu) |
| 169 | { | 170 | { |
| 170 | struct intel_gvt *gvt = vgpu->gvt; | 171 | struct intel_gvt *gvt = vgpu->gvt; |
| @@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) | |||
| 178 | intel_runtime_pm_get(dev_priv); | 179 | intel_runtime_pm_get(dev_priv); |
| 179 | 180 | ||
| 180 | mutex_lock(&dev_priv->drm.struct_mutex); | 181 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 182 | _clear_vgpu_fence(vgpu); | ||
| 181 | for (i = 0; i < vgpu_fence_sz(vgpu); i++) { | 183 | for (i = 0; i < vgpu_fence_sz(vgpu); i++) { |
| 182 | reg = vgpu->fence.regs[i]; | 184 | reg = vgpu->fence.regs[i]; |
| 183 | intel_vgpu_write_fence(vgpu, i, 0); | ||
| 184 | list_add_tail(®->link, | 185 | list_add_tail(®->link, |
| 185 | &dev_priv->mm.fence_list); | 186 | &dev_priv->mm.fence_list); |
| 186 | } | 187 | } |
| @@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) | |||
| 208 | continue; | 209 | continue; |
| 209 | list_del(pos); | 210 | list_del(pos); |
| 210 | vgpu->fence.regs[i] = reg; | 211 | vgpu->fence.regs[i] = reg; |
| 211 | intel_vgpu_write_fence(vgpu, i, 0); | ||
| 212 | if (++i == vgpu_fence_sz(vgpu)) | 212 | if (++i == vgpu_fence_sz(vgpu)) |
| 213 | break; | 213 | break; |
| 214 | } | 214 | } |
| 215 | if (i != vgpu_fence_sz(vgpu)) | 215 | if (i != vgpu_fence_sz(vgpu)) |
| 216 | goto out_free_fence; | 216 | goto out_free_fence; |
| 217 | 217 | ||
| 218 | _clear_vgpu_fence(vgpu); | ||
| 219 | |||
| 218 | mutex_unlock(&dev_priv->drm.struct_mutex); | 220 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 219 | intel_runtime_pm_put(dev_priv); | 221 | intel_runtime_pm_put(dev_priv); |
| 220 | return 0; | 222 | return 0; |
| @@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu) | |||
| 314 | } | 316 | } |
| 315 | 317 | ||
| 316 | /** | 318 | /** |
| 319 | * intel_vgpu_reset_resource - reset resource state owned by a vGPU | ||
| 320 | * @vgpu: a vGPU | ||
| 321 | * | ||
| 322 | * This function is used to reset resource state owned by a vGPU. | ||
| 323 | * | ||
| 324 | */ | ||
| 325 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) | ||
| 326 | { | ||
| 327 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
| 328 | |||
| 329 | intel_runtime_pm_get(dev_priv); | ||
| 330 | _clear_vgpu_fence(vgpu); | ||
| 331 | intel_runtime_pm_put(dev_priv); | ||
| 332 | } | ||
| 333 | |||
| 334 | /** | ||
| 317 | * intel_alloc_vgpu_resource - allocate HW resource for a vGPU | 335 | * intel_alloc_vgpu_resource - allocate HW resource for a vGPU |
| 318 | * @vgpu: vGPU | 336 | * @vgpu: vGPU |
| 319 | * @param: vGPU creation params | 337 | * @param: vGPU creation params |
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 711c31c8d8b4..4a6a2ed65732 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c | |||
| @@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 282 | } | 282 | } |
| 283 | return 0; | 283 | return 0; |
| 284 | } | 284 | } |
| 285 | |||
| 286 | /** | ||
| 287 | * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU | ||
| 288 | * | ||
| 289 | * @vgpu: a vGPU | ||
| 290 | * @primary: is the vGPU presented as primary | ||
| 291 | * | ||
| 292 | */ | ||
| 293 | void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, | ||
| 294 | bool primary) | ||
| 295 | { | ||
| 296 | struct intel_gvt *gvt = vgpu->gvt; | ||
| 297 | const struct intel_gvt_device_info *info = &gvt->device_info; | ||
| 298 | u16 *gmch_ctl; | ||
| 299 | int i; | ||
| 300 | |||
| 301 | memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, | ||
| 302 | info->cfg_space_size); | ||
| 303 | |||
| 304 | if (!primary) { | ||
| 305 | vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = | ||
| 306 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
| 307 | vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = | ||
| 308 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
| 309 | } | ||
| 310 | |||
| 311 | /* Show guest that there isn't any stolen memory.*/ | ||
| 312 | gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); | ||
| 313 | *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); | ||
| 314 | |||
| 315 | intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, | ||
| 316 | gvt_aperture_pa_base(gvt), true); | ||
| 317 | |||
| 318 | vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO | ||
| 319 | | PCI_COMMAND_MEMORY | ||
| 320 | | PCI_COMMAND_MASTER); | ||
| 321 | /* | ||
| 322 | * Clear the bar upper 32bit and let guest to assign the new value | ||
| 323 | */ | ||
| 324 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); | ||
| 325 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); | ||
| 326 | memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); | ||
| 327 | |||
| 328 | for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { | ||
| 329 | vgpu->cfg_space.bar[i].size = pci_resource_len( | ||
| 330 | gvt->dev_priv->drm.pdev, i * 2); | ||
| 331 | vgpu->cfg_space.bar[i].tracked = false; | ||
| 332 | } | ||
| 333 | } | ||
| 334 | |||
| 335 | /** | ||
| 336 | * intel_vgpu_reset_cfg_space - reset vGPU configuration space | ||
| 337 | * | ||
| 338 | * @vgpu: a vGPU | ||
| 339 | * | ||
| 340 | */ | ||
| 341 | void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) | ||
| 342 | { | ||
| 343 | u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; | ||
| 344 | bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != | ||
| 345 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
| 346 | |||
| 347 | if (cmd & PCI_COMMAND_MEMORY) { | ||
| 348 | trap_gttmmio(vgpu, false); | ||
| 349 | map_aperture(vgpu, false); | ||
| 350 | } | ||
| 351 | |||
| 352 | /** | ||
| 353 | * Currently we only do such reset when vGPU is not | ||
| 354 | * owned by any VM, so we simply restore entire cfg | ||
| 355 | * space to default value. | ||
| 356 | */ | ||
| 357 | intel_vgpu_init_cfg_space(vgpu, primary); | ||
| 358 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6c5fdf5b2ce2..47dec4acf7ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
| @@ -240,15 +240,8 @@ static inline int get_pse_type(int type) | |||
| 240 | static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) | 240 | static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) |
| 241 | { | 241 | { |
| 242 | void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; | 242 | void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; |
| 243 | u64 pte; | ||
| 244 | 243 | ||
| 245 | #ifdef readq | 244 | return readq(addr); |
| 246 | pte = readq(addr); | ||
| 247 | #else | ||
| 248 | pte = ioread32(addr); | ||
| 249 | pte |= (u64)ioread32(addr + 4) << 32; | ||
| 250 | #endif | ||
| 251 | return pte; | ||
| 252 | } | 245 | } |
| 253 | 246 | ||
| 254 | static void write_pte64(struct drm_i915_private *dev_priv, | 247 | static void write_pte64(struct drm_i915_private *dev_priv, |
| @@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv, | |||
| 256 | { | 249 | { |
| 257 | void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; | 250 | void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; |
| 258 | 251 | ||
| 259 | #ifdef writeq | ||
| 260 | writeq(pte, addr); | 252 | writeq(pte, addr); |
| 261 | #else | 253 | |
| 262 | iowrite32((u32)pte, addr); | ||
| 263 | iowrite32(pte >> 32, addr + 4); | ||
| 264 | #endif | ||
| 265 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | 254 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
| 266 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 255 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
| 267 | } | 256 | } |
| @@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) | |||
| 1380 | info->gtt_entry_size; | 1369 | info->gtt_entry_size; |
| 1381 | mem = kzalloc(mm->has_shadow_page_table ? | 1370 | mem = kzalloc(mm->has_shadow_page_table ? |
| 1382 | mm->page_table_entry_size * 2 | 1371 | mm->page_table_entry_size * 2 |
| 1383 | : mm->page_table_entry_size, | 1372 | : mm->page_table_entry_size, GFP_KERNEL); |
| 1384 | GFP_ATOMIC); | ||
| 1385 | if (!mem) | 1373 | if (!mem) |
| 1386 | return -ENOMEM; | 1374 | return -ENOMEM; |
| 1387 | mm->virtual_page_table = mem; | 1375 | mm->virtual_page_table = mem; |
| @@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, | |||
| 1532 | struct intel_vgpu_mm *mm; | 1520 | struct intel_vgpu_mm *mm; |
| 1533 | int ret; | 1521 | int ret; |
| 1534 | 1522 | ||
| 1535 | mm = kzalloc(sizeof(*mm), GFP_ATOMIC); | 1523 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); |
| 1536 | if (!mm) { | 1524 | if (!mm) { |
| 1537 | ret = -ENOMEM; | 1525 | ret = -ENOMEM; |
| 1538 | goto fail; | 1526 | goto fail; |
| @@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
| 1886 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; | 1874 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
| 1887 | int page_entry_num = GTT_PAGE_SIZE >> | 1875 | int page_entry_num = GTT_PAGE_SIZE >> |
| 1888 | vgpu->gvt->device_info.gtt_entry_size_shift; | 1876 | vgpu->gvt->device_info.gtt_entry_size_shift; |
| 1889 | struct page *scratch_pt; | 1877 | void *scratch_pt; |
| 1890 | unsigned long mfn; | 1878 | unsigned long mfn; |
| 1891 | int i; | 1879 | int i; |
| 1892 | void *p; | ||
| 1893 | 1880 | ||
| 1894 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) | 1881 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) |
| 1895 | return -EINVAL; | 1882 | return -EINVAL; |
| 1896 | 1883 | ||
| 1897 | scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); | 1884 | scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); |
| 1898 | if (!scratch_pt) { | 1885 | if (!scratch_pt) { |
| 1899 | gvt_err("fail to allocate scratch page\n"); | 1886 | gvt_err("fail to allocate scratch page\n"); |
| 1900 | return -ENOMEM; | 1887 | return -ENOMEM; |
| 1901 | } | 1888 | } |
| 1902 | 1889 | ||
| 1903 | p = kmap_atomic(scratch_pt); | 1890 | mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); |
| 1904 | mfn = intel_gvt_hypervisor_virt_to_mfn(p); | ||
| 1905 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 1891 | if (mfn == INTEL_GVT_INVALID_ADDR) { |
| 1906 | gvt_err("fail to translate vaddr:0x%llx\n", (u64)p); | 1892 | gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); |
| 1907 | kunmap_atomic(p); | 1893 | free_page((unsigned long)scratch_pt); |
| 1908 | __free_page(scratch_pt); | ||
| 1909 | return -EFAULT; | 1894 | return -EFAULT; |
| 1910 | } | 1895 | } |
| 1911 | gtt->scratch_pt[type].page_mfn = mfn; | 1896 | gtt->scratch_pt[type].page_mfn = mfn; |
| 1912 | gtt->scratch_pt[type].page = scratch_pt; | 1897 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); |
| 1913 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", | 1898 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", |
| 1914 | vgpu->id, type, mfn); | 1899 | vgpu->id, type, mfn); |
| 1915 | 1900 | ||
| @@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
| 1918 | * scratch_pt[type] indicate the scratch pt/scratch page used by the | 1903 | * scratch_pt[type] indicate the scratch pt/scratch page used by the |
| 1919 | * 'type' pt. | 1904 | * 'type' pt. |
| 1920 | * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by | 1905 | * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by |
| 1921 | * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self | 1906 | * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self |
| 1922 | * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. | 1907 | * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. |
| 1923 | */ | 1908 | */ |
| 1924 | if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { | 1909 | if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { |
| @@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
| 1936 | se.val64 |= PPAT_CACHED_INDEX; | 1921 | se.val64 |= PPAT_CACHED_INDEX; |
| 1937 | 1922 | ||
| 1938 | for (i = 0; i < page_entry_num; i++) | 1923 | for (i = 0; i < page_entry_num; i++) |
| 1939 | ops->set_entry(p, &se, i, false, 0, vgpu); | 1924 | ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); |
| 1940 | } | 1925 | } |
| 1941 | 1926 | ||
| 1942 | kunmap_atomic(p); | ||
| 1943 | |||
| 1944 | return 0; | 1927 | return 0; |
| 1945 | } | 1928 | } |
| 1946 | 1929 | ||
| @@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, | |||
| 2208 | int intel_gvt_init_gtt(struct intel_gvt *gvt) | 2191 | int intel_gvt_init_gtt(struct intel_gvt *gvt) |
| 2209 | { | 2192 | { |
| 2210 | int ret; | 2193 | int ret; |
| 2211 | void *page_addr; | 2194 | void *page; |
| 2212 | 2195 | ||
| 2213 | gvt_dbg_core("init gtt\n"); | 2196 | gvt_dbg_core("init gtt\n"); |
| 2214 | 2197 | ||
| @@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
| 2221 | return -ENODEV; | 2204 | return -ENODEV; |
| 2222 | } | 2205 | } |
| 2223 | 2206 | ||
| 2224 | gvt->gtt.scratch_ggtt_page = | 2207 | page = (void *)get_zeroed_page(GFP_KERNEL); |
| 2225 | alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); | 2208 | if (!page) { |
| 2226 | if (!gvt->gtt.scratch_ggtt_page) { | ||
| 2227 | gvt_err("fail to allocate scratch ggtt page\n"); | 2209 | gvt_err("fail to allocate scratch ggtt page\n"); |
| 2228 | return -ENOMEM; | 2210 | return -ENOMEM; |
| 2229 | } | 2211 | } |
| 2212 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
| 2230 | 2213 | ||
| 2231 | page_addr = page_address(gvt->gtt.scratch_ggtt_page); | 2214 | gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); |
| 2232 | |||
| 2233 | gvt->gtt.scratch_ggtt_mfn = | ||
| 2234 | intel_gvt_hypervisor_virt_to_mfn(page_addr); | ||
| 2235 | if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { | 2215 | if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { |
| 2236 | gvt_err("fail to translate scratch ggtt page\n"); | 2216 | gvt_err("fail to translate scratch ggtt page\n"); |
| 2237 | __free_page(gvt->gtt.scratch_ggtt_page); | 2217 | __free_page(gvt->gtt.scratch_ggtt_page); |
| @@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) | |||
| 2297 | for (offset = 0; offset < num_entries; offset++) | 2277 | for (offset = 0; offset < num_entries; offset++) |
| 2298 | ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); | 2278 | ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); |
| 2299 | } | 2279 | } |
| 2280 | |||
| 2281 | /** | ||
| 2282 | * intel_vgpu_reset_gtt - reset the all GTT related status | ||
| 2283 | * @vgpu: a vGPU | ||
| 2284 | * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset | ||
| 2285 | * | ||
| 2286 | * This function is called from vfio core to reset reset all | ||
| 2287 | * GTT related status, including GGTT, PPGTT, scratch page. | ||
| 2288 | * | ||
| 2289 | */ | ||
| 2290 | void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) | ||
| 2291 | { | ||
| 2292 | int i; | ||
| 2293 | |||
| 2294 | ppgtt_free_all_shadow_page(vgpu); | ||
| 2295 | if (!dmlr) | ||
| 2296 | return; | ||
| 2297 | |||
| 2298 | intel_vgpu_reset_ggtt(vgpu); | ||
| 2299 | |||
| 2300 | /* clear scratch page for security */ | ||
| 2301 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { | ||
| 2302 | if (vgpu->gtt.scratch_pt[i].page != NULL) | ||
| 2303 | memset(page_address(vgpu->gtt.scratch_pt[i].page), | ||
| 2304 | 0, PAGE_SIZE); | ||
| 2305 | } | ||
| 2306 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index b315ab3593ec..f88eb5e89bea 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h | |||
| @@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); | |||
| 208 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); | 208 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); |
| 209 | 209 | ||
| 210 | extern int intel_gvt_init_gtt(struct intel_gvt *gvt); | 210 | extern int intel_gvt_init_gtt(struct intel_gvt *gvt); |
| 211 | extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); | ||
| 211 | extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); | 212 | extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); |
| 212 | 213 | ||
| 213 | extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, | 214 | extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 398877c3d2fd..e6bf5c533fbe 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c | |||
| @@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) | |||
| 201 | intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); | 201 | intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt); |
| 202 | intel_gvt_clean_vgpu_types(gvt); | 202 | intel_gvt_clean_vgpu_types(gvt); |
| 203 | 203 | ||
| 204 | idr_destroy(&gvt->vgpu_idr); | ||
| 205 | |||
| 204 | kfree(dev_priv->gvt); | 206 | kfree(dev_priv->gvt); |
| 205 | dev_priv->gvt = NULL; | 207 | dev_priv->gvt = NULL; |
| 206 | } | 208 | } |
| @@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) | |||
| 237 | 239 | ||
| 238 | gvt_dbg_core("init gvt device\n"); | 240 | gvt_dbg_core("init gvt device\n"); |
| 239 | 241 | ||
| 242 | idr_init(&gvt->vgpu_idr); | ||
| 243 | |||
| 240 | mutex_init(&gvt->lock); | 244 | mutex_init(&gvt->lock); |
| 241 | gvt->dev_priv = dev_priv; | 245 | gvt->dev_priv = dev_priv; |
| 242 | 246 | ||
| @@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) | |||
| 244 | 248 | ||
| 245 | ret = intel_gvt_setup_mmio_info(gvt); | 249 | ret = intel_gvt_setup_mmio_info(gvt); |
| 246 | if (ret) | 250 | if (ret) |
| 247 | return ret; | 251 | goto out_clean_idr; |
| 248 | 252 | ||
| 249 | ret = intel_gvt_load_firmware(gvt); | 253 | ret = intel_gvt_load_firmware(gvt); |
| 250 | if (ret) | 254 | if (ret) |
| @@ -313,6 +317,8 @@ out_free_firmware: | |||
| 313 | intel_gvt_free_firmware(gvt); | 317 | intel_gvt_free_firmware(gvt); |
| 314 | out_clean_mmio_info: | 318 | out_clean_mmio_info: |
| 315 | intel_gvt_clean_mmio_info(gvt); | 319 | intel_gvt_clean_mmio_info(gvt); |
| 320 | out_clean_idr: | ||
| 321 | idr_destroy(&gvt->vgpu_idr); | ||
| 316 | kfree(gvt); | 322 | kfree(gvt); |
| 317 | return ret; | 323 | return ret; |
| 318 | } | 324 | } |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0af17016f33f..e227caf5859e 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h | |||
| @@ -323,6 +323,7 @@ struct intel_vgpu_creation_params { | |||
| 323 | 323 | ||
| 324 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, | 324 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, |
| 325 | struct intel_vgpu_creation_params *param); | 325 | struct intel_vgpu_creation_params *param); |
| 326 | void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); | ||
| 326 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); | 327 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); |
| 327 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, | 328 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, |
| 328 | u32 fence, u64 value); | 329 | u32 fence, u64 value); |
| @@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); | |||
| 375 | struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | 376 | struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, |
| 376 | struct intel_vgpu_type *type); | 377 | struct intel_vgpu_type *type); |
| 377 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); | 378 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); |
| 379 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | ||
| 380 | unsigned int engine_mask); | ||
| 378 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); | 381 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); |
| 379 | 382 | ||
| 380 | 383 | ||
| @@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, | |||
| 411 | int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, | 414 | int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, |
| 412 | unsigned long *g_index); | 415 | unsigned long *g_index); |
| 413 | 416 | ||
| 417 | void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, | ||
| 418 | bool primary); | ||
| 419 | void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); | ||
| 420 | |||
| 414 | int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, | 421 | int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, |
| 415 | void *p_data, unsigned int bytes); | 422 | void *p_data, unsigned int bytes); |
| 416 | 423 | ||
| @@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); | |||
| 424 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); | 431 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); |
| 425 | 432 | ||
| 426 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); | 433 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); |
| 427 | int setup_vgpu_mmio(struct intel_vgpu *vgpu); | ||
| 428 | void populate_pvinfo_page(struct intel_vgpu *vgpu); | 434 | void populate_pvinfo_page(struct intel_vgpu *vgpu); |
| 429 | 435 | ||
| 430 | struct intel_gvt_ops { | 436 | struct intel_gvt_ops { |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 522809710312..ab2ea157da4c 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 93 | static int new_mmio_info(struct intel_gvt *gvt, | 93 | static int new_mmio_info(struct intel_gvt *gvt, |
| 94 | u32 offset, u32 flags, u32 size, | 94 | u32 offset, u32 flags, u32 size, |
| 95 | u32 addr_mask, u32 ro_mask, u32 device, | 95 | u32 addr_mask, u32 ro_mask, u32 device, |
| 96 | void *read, void *write) | 96 | int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int), |
| 97 | int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int)) | ||
| 97 | { | 98 | { |
| 98 | struct intel_gvt_mmio_info *info, *p; | 99 | struct intel_gvt_mmio_info *info, *p; |
| 99 | u32 start, end, i; | 100 | u32 start, end, i; |
| @@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, | |||
| 219 | default: | 220 | default: |
| 220 | /*should not hit here*/ | 221 | /*should not hit here*/ |
| 221 | gvt_err("invalid forcewake offset 0x%x\n", offset); | 222 | gvt_err("invalid forcewake offset 0x%x\n", offset); |
| 222 | return 1; | 223 | return -EINVAL; |
| 223 | } | 224 | } |
| 224 | } else { | 225 | } else { |
| 225 | ack_reg_offset = FORCEWAKE_ACK_HSW_REG; | 226 | ack_reg_offset = FORCEWAKE_ACK_HSW_REG; |
| @@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, | |||
| 230 | return 0; | 231 | return 0; |
| 231 | } | 232 | } |
| 232 | 233 | ||
| 233 | static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, | ||
| 234 | void *p_data, unsigned int bytes, unsigned long bitmap) | ||
| 235 | { | ||
| 236 | struct intel_gvt_workload_scheduler *scheduler = | ||
| 237 | &vgpu->gvt->scheduler; | ||
| 238 | |||
| 239 | vgpu->resetting = true; | ||
| 240 | |||
| 241 | intel_vgpu_stop_schedule(vgpu); | ||
| 242 | /* | ||
| 243 | * The current_vgpu will set to NULL after stopping the | ||
| 244 | * scheduler when the reset is triggered by current vgpu. | ||
| 245 | */ | ||
| 246 | if (scheduler->current_vgpu == NULL) { | ||
| 247 | mutex_unlock(&vgpu->gvt->lock); | ||
| 248 | intel_gvt_wait_vgpu_idle(vgpu); | ||
| 249 | mutex_lock(&vgpu->gvt->lock); | ||
| 250 | } | ||
| 251 | |||
| 252 | intel_vgpu_reset_execlist(vgpu, bitmap); | ||
| 253 | |||
| 254 | /* full GPU reset */ | ||
| 255 | if (bitmap == 0xff) { | ||
| 256 | mutex_unlock(&vgpu->gvt->lock); | ||
| 257 | intel_vgpu_clean_gtt(vgpu); | ||
| 258 | mutex_lock(&vgpu->gvt->lock); | ||
| 259 | setup_vgpu_mmio(vgpu); | ||
| 260 | populate_pvinfo_page(vgpu); | ||
| 261 | intel_vgpu_init_gtt(vgpu); | ||
| 262 | } | ||
| 263 | |||
| 264 | vgpu->resetting = false; | ||
| 265 | |||
| 266 | return 0; | ||
| 267 | } | ||
| 268 | |||
| 269 | static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | 234 | static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
| 270 | void *p_data, unsigned int bytes) | 235 | void *p_data, unsigned int bytes) |
| 271 | { | 236 | { |
| 237 | unsigned int engine_mask = 0; | ||
| 272 | u32 data; | 238 | u32 data; |
| 273 | u64 bitmap = 0; | ||
| 274 | 239 | ||
| 275 | write_vreg(vgpu, offset, p_data, bytes); | 240 | write_vreg(vgpu, offset, p_data, bytes); |
| 276 | data = vgpu_vreg(vgpu, offset); | 241 | data = vgpu_vreg(vgpu, offset); |
| 277 | 242 | ||
| 278 | if (data & GEN6_GRDOM_FULL) { | 243 | if (data & GEN6_GRDOM_FULL) { |
| 279 | gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); | 244 | gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); |
| 280 | bitmap = 0xff; | 245 | engine_mask = ALL_ENGINES; |
| 281 | } | 246 | } else { |
| 282 | if (data & GEN6_GRDOM_RENDER) { | 247 | if (data & GEN6_GRDOM_RENDER) { |
| 283 | gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); | 248 | gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); |
| 284 | bitmap |= (1 << RCS); | 249 | engine_mask |= (1 << RCS); |
| 285 | } | 250 | } |
| 286 | if (data & GEN6_GRDOM_MEDIA) { | 251 | if (data & GEN6_GRDOM_MEDIA) { |
| 287 | gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); | 252 | gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); |
| 288 | bitmap |= (1 << VCS); | 253 | engine_mask |= (1 << VCS); |
| 289 | } | 254 | } |
| 290 | if (data & GEN6_GRDOM_BLT) { | 255 | if (data & GEN6_GRDOM_BLT) { |
| 291 | gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); | 256 | gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); |
| 292 | bitmap |= (1 << BCS); | 257 | engine_mask |= (1 << BCS); |
| 293 | } | 258 | } |
| 294 | if (data & GEN6_GRDOM_VECS) { | 259 | if (data & GEN6_GRDOM_VECS) { |
| 295 | gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); | 260 | gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); |
| 296 | bitmap |= (1 << VECS); | 261 | engine_mask |= (1 << VECS); |
| 297 | } | 262 | } |
| 298 | if (data & GEN8_GRDOM_MEDIA2) { | 263 | if (data & GEN8_GRDOM_MEDIA2) { |
| 299 | gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); | 264 | gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); |
| 300 | if (HAS_BSD2(vgpu->gvt->dev_priv)) | 265 | if (HAS_BSD2(vgpu->gvt->dev_priv)) |
| 301 | bitmap |= (1 << VCS2); | 266 | engine_mask |= (1 << VCS2); |
| 267 | } | ||
| 302 | } | 268 | } |
| 303 | return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); | 269 | |
| 270 | intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); | ||
| 271 | |||
| 272 | return 0; | ||
| 304 | } | 273 | } |
| 305 | 274 | ||
| 306 | static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, | 275 | static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, |
| @@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 974 | return 0; | 943 | return 0; |
| 975 | } | 944 | } |
| 976 | 945 | ||
| 977 | static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | 946 | static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
| 978 | void *p_data, unsigned int bytes) | 947 | void *p_data, unsigned int bytes) |
| 979 | { | 948 | { |
| 980 | u32 data; | 949 | u32 data; |
| @@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 1366 | static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, | 1335 | static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, |
| 1367 | unsigned int offset, void *p_data, unsigned int bytes) | 1336 | unsigned int offset, void *p_data, unsigned int bytes) |
| 1368 | { | 1337 | { |
| 1369 | int rc = 0; | ||
| 1370 | unsigned int id = 0; | 1338 | unsigned int id = 0; |
| 1371 | 1339 | ||
| 1372 | write_vreg(vgpu, offset, p_data, bytes); | 1340 | write_vreg(vgpu, offset, p_data, bytes); |
| @@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, | |||
| 1389 | id = VECS; | 1357 | id = VECS; |
| 1390 | break; | 1358 | break; |
| 1391 | default: | 1359 | default: |
| 1392 | rc = -EINVAL; | 1360 | return -EINVAL; |
| 1393 | break; | ||
| 1394 | } | 1361 | } |
| 1395 | set_bit(id, (void *)vgpu->tlb_handle_pending); | 1362 | set_bit(id, (void *)vgpu->tlb_handle_pending); |
| 1396 | 1363 | ||
| 1397 | return rc; | 1364 | return 0; |
| 1398 | } | 1365 | } |
| 1399 | 1366 | ||
| 1400 | static int ring_reset_ctl_write(struct intel_vgpu *vgpu, | 1367 | static int ring_reset_ctl_write(struct intel_vgpu *vgpu, |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index faaae07ae487..0c9234a87a20 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
| @@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) | |||
| 398 | struct intel_vgpu_type *type; | 398 | struct intel_vgpu_type *type; |
| 399 | struct device *pdev; | 399 | struct device *pdev; |
| 400 | void *gvt; | 400 | void *gvt; |
| 401 | int ret; | ||
| 401 | 402 | ||
| 402 | pdev = mdev_parent_dev(mdev); | 403 | pdev = mdev_parent_dev(mdev); |
| 403 | gvt = kdev_to_i915(pdev)->gvt; | 404 | gvt = kdev_to_i915(pdev)->gvt; |
| @@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) | |||
| 406 | if (!type) { | 407 | if (!type) { |
| 407 | gvt_err("failed to find type %s to create\n", | 408 | gvt_err("failed to find type %s to create\n", |
| 408 | kobject_name(kobj)); | 409 | kobject_name(kobj)); |
| 409 | return -EINVAL; | 410 | ret = -EINVAL; |
| 411 | goto out; | ||
| 410 | } | 412 | } |
| 411 | 413 | ||
| 412 | vgpu = intel_gvt_ops->vgpu_create(gvt, type); | 414 | vgpu = intel_gvt_ops->vgpu_create(gvt, type); |
| 413 | if (IS_ERR_OR_NULL(vgpu)) { | 415 | if (IS_ERR_OR_NULL(vgpu)) { |
| 414 | gvt_err("create intel vgpu failed\n"); | 416 | ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); |
| 415 | return -EINVAL; | 417 | gvt_err("failed to create intel vgpu: %d\n", ret); |
| 418 | goto out; | ||
| 416 | } | 419 | } |
| 417 | 420 | ||
| 418 | INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); | 421 | INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work); |
| @@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) | |||
| 422 | 425 | ||
| 423 | gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", | 426 | gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", |
| 424 | dev_name(mdev_dev(mdev))); | 427 | dev_name(mdev_dev(mdev))); |
| 425 | return 0; | 428 | ret = 0; |
| 429 | |||
| 430 | out: | ||
| 431 | return ret; | ||
| 426 | } | 432 | } |
| 427 | 433 | ||
| 428 | static int intel_vgpu_remove(struct mdev_device *mdev) | 434 | static int intel_vgpu_remove(struct mdev_device *mdev) |
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 09c9450a1946..4df078bc5d04 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c | |||
| @@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, | |||
| 125 | if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) | 125 | if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) |
| 126 | goto err; | 126 | goto err; |
| 127 | 127 | ||
| 128 | mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); | ||
| 129 | if (!mmio && !vgpu->mmio.disable_warn_untrack) { | ||
| 130 | gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", | ||
| 131 | vgpu->id, offset, bytes, *(u32 *)p_data); | ||
| 132 | |||
| 133 | if (offset == 0x206c) { | ||
| 134 | gvt_err("------------------------------------------\n"); | ||
| 135 | gvt_err("vgpu%d: likely triggers a gfx reset\n", | ||
| 136 | vgpu->id); | ||
| 137 | gvt_err("------------------------------------------\n"); | ||
| 138 | vgpu->mmio.disable_warn_untrack = true; | ||
| 139 | } | ||
| 140 | } | ||
| 141 | |||
| 142 | if (!intel_gvt_mmio_is_unalign(gvt, offset)) { | 128 | if (!intel_gvt_mmio_is_unalign(gvt, offset)) { |
| 143 | if (WARN_ON(!IS_ALIGNED(offset, bytes))) | 129 | if (WARN_ON(!IS_ALIGNED(offset, bytes))) |
| 144 | goto err; | 130 | goto err; |
| 145 | } | 131 | } |
| 146 | 132 | ||
| 133 | mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); | ||
| 147 | if (mmio) { | 134 | if (mmio) { |
| 148 | if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { | 135 | if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { |
| 149 | if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) | 136 | if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) |
| @@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, | |||
| 152 | goto err; | 139 | goto err; |
| 153 | } | 140 | } |
| 154 | ret = mmio->read(vgpu, offset, p_data, bytes); | 141 | ret = mmio->read(vgpu, offset, p_data, bytes); |
| 155 | } else | 142 | } else { |
| 156 | ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); | 143 | ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); |
| 157 | 144 | ||
| 145 | if (!vgpu->mmio.disable_warn_untrack) { | ||
| 146 | gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", | ||
| 147 | vgpu->id, offset, bytes, *(u32 *)p_data); | ||
| 148 | |||
| 149 | if (offset == 0x206c) { | ||
| 150 | gvt_err("------------------------------------------\n"); | ||
| 151 | gvt_err("vgpu%d: likely triggers a gfx reset\n", | ||
| 152 | vgpu->id); | ||
| 153 | gvt_err("------------------------------------------\n"); | ||
| 154 | vgpu->mmio.disable_warn_untrack = true; | ||
| 155 | } | ||
| 156 | } | ||
| 157 | } | ||
| 158 | |||
| 158 | if (ret) | 159 | if (ret) |
| 159 | goto err; | 160 | goto err; |
| 160 | 161 | ||
| @@ -302,3 +303,56 @@ err: | |||
| 302 | mutex_unlock(&gvt->lock); | 303 | mutex_unlock(&gvt->lock); |
| 303 | return ret; | 304 | return ret; |
| 304 | } | 305 | } |
| 306 | |||
| 307 | |||
| 308 | /** | ||
| 309 | * intel_vgpu_reset_mmio - reset virtual MMIO space | ||
| 310 | * @vgpu: a vGPU | ||
| 311 | * | ||
| 312 | */ | ||
| 313 | void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) | ||
| 314 | { | ||
| 315 | struct intel_gvt *gvt = vgpu->gvt; | ||
| 316 | const struct intel_gvt_device_info *info = &gvt->device_info; | ||
| 317 | |||
| 318 | memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); | ||
| 319 | memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); | ||
| 320 | |||
| 321 | vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; | ||
| 322 | |||
| 323 | /* set the bit 0:2(Core C-State ) to C0 */ | ||
| 324 | vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; | ||
| 325 | } | ||
| 326 | |||
| 327 | /** | ||
| 328 | * intel_vgpu_init_mmio - init MMIO space | ||
| 329 | * @vgpu: a vGPU | ||
| 330 | * | ||
| 331 | * Returns: | ||
| 332 | * Zero on success, negative error code if failed | ||
| 333 | */ | ||
| 334 | int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) | ||
| 335 | { | ||
| 336 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; | ||
| 337 | |||
| 338 | vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); | ||
| 339 | if (!vgpu->mmio.vreg) | ||
| 340 | return -ENOMEM; | ||
| 341 | |||
| 342 | vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; | ||
| 343 | |||
| 344 | intel_vgpu_reset_mmio(vgpu); | ||
| 345 | |||
| 346 | return 0; | ||
| 347 | } | ||
| 348 | |||
| 349 | /** | ||
| 350 | * intel_vgpu_clean_mmio - clean MMIO space | ||
| 351 | * @vgpu: a vGPU | ||
| 352 | * | ||
| 353 | */ | ||
| 354 | void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) | ||
| 355 | { | ||
| 356 | vfree(vgpu->mmio.vreg); | ||
| 357 | vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; | ||
| 358 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 87d5b5e366a3..3bc620f56f35 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h | |||
| @@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, | |||
| 86 | *offset; \ | 86 | *offset; \ |
| 87 | }) | 87 | }) |
| 88 | 88 | ||
| 89 | int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); | ||
| 90 | void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); | ||
| 91 | void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); | ||
| 92 | |||
| 89 | int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); | 93 | int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); |
| 90 | 94 | ||
| 91 | int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, | 95 | int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa, |
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 81cd921770c6..d9fb41ab7119 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c | |||
| @@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) | |||
| 36 | vgpu->id)) | 36 | vgpu->id)) |
| 37 | return -EINVAL; | 37 | return -EINVAL; |
| 38 | 38 | ||
| 39 | vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC | | 39 | vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | |
| 40 | GFP_DMA32 | __GFP_ZERO, | 40 | __GFP_ZERO, |
| 41 | INTEL_GVT_OPREGION_PORDER); | 41 | get_order(INTEL_GVT_OPREGION_SIZE)); |
| 42 | 42 | ||
| 43 | if (!vgpu_opregion(vgpu)->va) | 43 | if (!vgpu_opregion(vgpu)->va) |
| 44 | return -ENOMEM; | 44 | return -ENOMEM; |
| @@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) | |||
| 97 | if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { | 97 | if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) { |
| 98 | map_vgpu_opregion(vgpu, false); | 98 | map_vgpu_opregion(vgpu, false); |
| 99 | free_pages((unsigned long)vgpu_opregion(vgpu)->va, | 99 | free_pages((unsigned long)vgpu_opregion(vgpu)->va, |
| 100 | INTEL_GVT_OPREGION_PORDER); | 100 | get_order(INTEL_GVT_OPREGION_SIZE)); |
| 101 | 101 | ||
| 102 | vgpu_opregion(vgpu)->va = NULL; | 102 | vgpu_opregion(vgpu)->va = NULL; |
| 103 | } | 103 | } |
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 0dfe789d8f02..fbd023a16f18 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h | |||
| @@ -50,8 +50,7 @@ | |||
| 50 | #define INTEL_GVT_OPREGION_PARM 0x204 | 50 | #define INTEL_GVT_OPREGION_PARM 0x204 |
| 51 | 51 | ||
| 52 | #define INTEL_GVT_OPREGION_PAGES 2 | 52 | #define INTEL_GVT_OPREGION_PAGES 2 |
| 53 | #define INTEL_GVT_OPREGION_PORDER 1 | 53 | #define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE) |
| 54 | #define INTEL_GVT_OPREGION_SIZE (2 * 4096) | ||
| 55 | 54 | ||
| 56 | #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) | 55 | #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) |
| 57 | 56 | ||
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 4db242250235..e91885dffeff 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
| @@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
| 350 | { | 350 | { |
| 351 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 351 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 352 | struct intel_vgpu_workload *workload; | 352 | struct intel_vgpu_workload *workload; |
| 353 | struct intel_vgpu *vgpu; | ||
| 353 | int event; | 354 | int event; |
| 354 | 355 | ||
| 355 | mutex_lock(&gvt->lock); | 356 | mutex_lock(&gvt->lock); |
| 356 | 357 | ||
| 357 | workload = scheduler->current_workload[ring_id]; | 358 | workload = scheduler->current_workload[ring_id]; |
| 359 | vgpu = workload->vgpu; | ||
| 358 | 360 | ||
| 359 | if (!workload->status && !workload->vgpu->resetting) { | 361 | if (!workload->status && !vgpu->resetting) { |
| 360 | wait_event(workload->shadow_ctx_status_wq, | 362 | wait_event(workload->shadow_ctx_status_wq, |
| 361 | !atomic_read(&workload->shadow_ctx_active)); | 363 | !atomic_read(&workload->shadow_ctx_active)); |
| 362 | 364 | ||
| @@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
| 364 | 366 | ||
| 365 | for_each_set_bit(event, workload->pending_events, | 367 | for_each_set_bit(event, workload->pending_events, |
| 366 | INTEL_GVT_EVENT_MAX) | 368 | INTEL_GVT_EVENT_MAX) |
| 367 | intel_vgpu_trigger_virtual_event(workload->vgpu, | 369 | intel_vgpu_trigger_virtual_event(vgpu, event); |
| 368 | event); | ||
| 369 | } | 370 | } |
| 370 | 371 | ||
| 371 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", | 372 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", |
| @@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
| 373 | 374 | ||
| 374 | scheduler->current_workload[ring_id] = NULL; | 375 | scheduler->current_workload[ring_id] = NULL; |
| 375 | 376 | ||
| 376 | atomic_dec(&workload->vgpu->running_workload_num); | ||
| 377 | |||
| 378 | list_del_init(&workload->list); | 377 | list_del_init(&workload->list); |
| 379 | workload->complete(workload); | 378 | workload->complete(workload); |
| 380 | 379 | ||
| 380 | atomic_dec(&vgpu->running_workload_num); | ||
| 381 | wake_up(&scheduler->workload_complete_wq); | 381 | wake_up(&scheduler->workload_complete_wq); |
| 382 | mutex_unlock(&gvt->lock); | 382 | mutex_unlock(&gvt->lock); |
| 383 | } | 383 | } |
| @@ -459,11 +459,11 @@ complete: | |||
| 459 | gvt_dbg_sched("will complete workload %p\n, status: %d\n", | 459 | gvt_dbg_sched("will complete workload %p\n, status: %d\n", |
| 460 | workload, workload->status); | 460 | workload, workload->status); |
| 461 | 461 | ||
| 462 | complete_current_workload(gvt, ring_id); | ||
| 463 | |||
| 464 | if (workload->req) | 462 | if (workload->req) |
| 465 | i915_gem_request_put(fetch_and_zero(&workload->req)); | 463 | i915_gem_request_put(fetch_and_zero(&workload->req)); |
| 466 | 464 | ||
| 465 | complete_current_workload(gvt, ring_id); | ||
| 466 | |||
| 467 | if (need_force_wake) | 467 | if (need_force_wake) |
| 468 | intel_uncore_forcewake_put(gvt->dev_priv, | 468 | intel_uncore_forcewake_put(gvt->dev_priv, |
| 469 | FORCEWAKE_ALL); | 469 | FORCEWAKE_ALL); |
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 536d2b9d5777..7295bc8e12fb 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c | |||
| @@ -35,79 +35,6 @@ | |||
| 35 | #include "gvt.h" | 35 | #include "gvt.h" |
| 36 | #include "i915_pvinfo.h" | 36 | #include "i915_pvinfo.h" |
| 37 | 37 | ||
| 38 | static void clean_vgpu_mmio(struct intel_vgpu *vgpu) | ||
| 39 | { | ||
| 40 | vfree(vgpu->mmio.vreg); | ||
| 41 | vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; | ||
| 42 | } | ||
| 43 | |||
| 44 | int setup_vgpu_mmio(struct intel_vgpu *vgpu) | ||
| 45 | { | ||
| 46 | struct intel_gvt *gvt = vgpu->gvt; | ||
| 47 | const struct intel_gvt_device_info *info = &gvt->device_info; | ||
| 48 | |||
| 49 | if (vgpu->mmio.vreg) | ||
| 50 | memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); | ||
| 51 | else { | ||
| 52 | vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); | ||
| 53 | if (!vgpu->mmio.vreg) | ||
| 54 | return -ENOMEM; | ||
| 55 | } | ||
| 56 | |||
| 57 | vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; | ||
| 58 | |||
| 59 | memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); | ||
| 60 | memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); | ||
| 61 | |||
| 62 | vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; | ||
| 63 | |||
| 64 | /* set the bit 0:2(Core C-State ) to C0 */ | ||
| 65 | vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; | ||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | |||
| 69 | static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, | ||
| 70 | struct intel_vgpu_creation_params *param) | ||
| 71 | { | ||
| 72 | struct intel_gvt *gvt = vgpu->gvt; | ||
| 73 | const struct intel_gvt_device_info *info = &gvt->device_info; | ||
| 74 | u16 *gmch_ctl; | ||
| 75 | int i; | ||
| 76 | |||
| 77 | memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, | ||
| 78 | info->cfg_space_size); | ||
| 79 | |||
| 80 | if (!param->primary) { | ||
| 81 | vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = | ||
| 82 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
| 83 | vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = | ||
| 84 | INTEL_GVT_PCI_CLASS_VGA_OTHER; | ||
| 85 | } | ||
| 86 | |||
| 87 | /* Show guest that there isn't any stolen memory.*/ | ||
| 88 | gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); | ||
| 89 | *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); | ||
| 90 | |||
| 91 | intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, | ||
| 92 | gvt_aperture_pa_base(gvt), true); | ||
| 93 | |||
| 94 | vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO | ||
| 95 | | PCI_COMMAND_MEMORY | ||
| 96 | | PCI_COMMAND_MASTER); | ||
| 97 | /* | ||
| 98 | * Clear the bar upper 32bit and let guest to assign the new value | ||
| 99 | */ | ||
| 100 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); | ||
| 101 | memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); | ||
| 102 | memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); | ||
| 103 | |||
| 104 | for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { | ||
| 105 | vgpu->cfg_space.bar[i].size = pci_resource_len( | ||
| 106 | gvt->dev_priv->drm.pdev, i * 2); | ||
| 107 | vgpu->cfg_space.bar[i].tracked = false; | ||
| 108 | } | ||
| 109 | } | ||
| 110 | |||
| 111 | void populate_pvinfo_page(struct intel_vgpu *vgpu) | 38 | void populate_pvinfo_page(struct intel_vgpu *vgpu) |
| 112 | { | 39 | { |
| 113 | /* setup the ballooning information */ | 40 | /* setup the ballooning information */ |
| @@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) | |||
| 177 | if (low_avail / min_low == 0) | 104 | if (low_avail / min_low == 0) |
| 178 | break; | 105 | break; |
| 179 | gvt->types[i].low_gm_size = min_low; | 106 | gvt->types[i].low_gm_size = min_low; |
| 180 | gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size; | 107 | gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); |
| 181 | gvt->types[i].fence = 4; | 108 | gvt->types[i].fence = 4; |
| 182 | gvt->types[i].max_instance = low_avail / min_low; | 109 | gvt->types[i].max_instance = low_avail / min_low; |
| 183 | gvt->types[i].avail_instance = gvt->types[i].max_instance; | 110 | gvt->types[i].avail_instance = gvt->types[i].max_instance; |
| @@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) | |||
| 217 | */ | 144 | */ |
| 218 | low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - | 145 | low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - |
| 219 | gvt->gm.vgpu_allocated_low_gm_size; | 146 | gvt->gm.vgpu_allocated_low_gm_size; |
| 220 | high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE - | 147 | high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - |
| 221 | gvt->gm.vgpu_allocated_high_gm_size; | 148 | gvt->gm.vgpu_allocated_high_gm_size; |
| 222 | fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - | 149 | fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - |
| 223 | gvt->fence.vgpu_allocated_fence_num; | 150 | gvt->fence.vgpu_allocated_fence_num; |
| @@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) | |||
| 268 | intel_vgpu_clean_gtt(vgpu); | 195 | intel_vgpu_clean_gtt(vgpu); |
| 269 | intel_gvt_hypervisor_detach_vgpu(vgpu); | 196 | intel_gvt_hypervisor_detach_vgpu(vgpu); |
| 270 | intel_vgpu_free_resource(vgpu); | 197 | intel_vgpu_free_resource(vgpu); |
| 271 | clean_vgpu_mmio(vgpu); | 198 | intel_vgpu_clean_mmio(vgpu); |
| 272 | vfree(vgpu); | 199 | vfree(vgpu); |
| 273 | 200 | ||
| 274 | intel_gvt_update_vgpu_types(gvt); | 201 | intel_gvt_update_vgpu_types(gvt); |
| @@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
| 300 | vgpu->gvt = gvt; | 227 | vgpu->gvt = gvt; |
| 301 | bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); | 228 | bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); |
| 302 | 229 | ||
| 303 | setup_vgpu_cfg_space(vgpu, param); | 230 | intel_vgpu_init_cfg_space(vgpu, param->primary); |
| 304 | 231 | ||
| 305 | ret = setup_vgpu_mmio(vgpu); | 232 | ret = intel_vgpu_init_mmio(vgpu); |
| 306 | if (ret) | 233 | if (ret) |
| 307 | goto out_free_vgpu; | 234 | goto out_clean_idr; |
| 308 | 235 | ||
| 309 | ret = intel_vgpu_alloc_resource(vgpu, param); | 236 | ret = intel_vgpu_alloc_resource(vgpu, param); |
| 310 | if (ret) | 237 | if (ret) |
| @@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu: | |||
| 354 | out_clean_vgpu_resource: | 281 | out_clean_vgpu_resource: |
| 355 | intel_vgpu_free_resource(vgpu); | 282 | intel_vgpu_free_resource(vgpu); |
| 356 | out_clean_vgpu_mmio: | 283 | out_clean_vgpu_mmio: |
| 357 | clean_vgpu_mmio(vgpu); | 284 | intel_vgpu_clean_mmio(vgpu); |
| 285 | out_clean_idr: | ||
| 286 | idr_remove(&gvt->vgpu_idr, vgpu->id); | ||
| 358 | out_free_vgpu: | 287 | out_free_vgpu: |
| 359 | vfree(vgpu); | 288 | vfree(vgpu); |
| 360 | mutex_unlock(&gvt->lock); | 289 | mutex_unlock(&gvt->lock); |
| @@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
| 398 | } | 327 | } |
| 399 | 328 | ||
| 400 | /** | 329 | /** |
| 401 | * intel_gvt_reset_vgpu - reset a virtual GPU | 330 | * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset |
| 331 | * @vgpu: virtual GPU | ||
| 332 | * @dmlr: vGPU Device Model Level Reset or GT Reset | ||
| 333 | * @engine_mask: engines to reset for GT reset | ||
| 334 | * | ||
| 335 | * This function is called when user wants to reset a virtual GPU through | ||
| 336 | * device model reset or GT reset. The caller should hold the gvt lock. | ||
| 337 | * | ||
| 338 | * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset | ||
| 339 | * the whole vGPU to default state as when it is created. This vGPU function | ||
| 340 | * is required both for functionary and security concerns.The ultimate goal | ||
| 341 | * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we | ||
| 342 | * assign a vGPU to a virtual machine we must isse such reset first. | ||
| 343 | * | ||
| 344 | * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines | ||
| 345 | * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. | ||
| 346 | * Unlike the FLR, GT reset only reset particular resource of a vGPU per | ||
| 347 | * the reset request. Guest driver can issue a GT reset by programming the | ||
| 348 | * virtual GDRST register to reset specific virtual GPU engine or all | ||
| 349 | * engines. | ||
| 350 | * | ||
| 351 | * The parameter dev_level is to identify if we will do DMLR or GT reset. | ||
| 352 | * The parameter engine_mask is to specific the engines that need to be | ||
| 353 | * resetted. If value ALL_ENGINES is given for engine_mask, it means | ||
| 354 | * the caller requests a full GT reset that we will reset all virtual | ||
| 355 | * GPU engines. For FLR, engine_mask is ignored. | ||
| 356 | */ | ||
| 357 | void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | ||
| 358 | unsigned int engine_mask) | ||
| 359 | { | ||
| 360 | struct intel_gvt *gvt = vgpu->gvt; | ||
| 361 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | ||
| 362 | |||
| 363 | gvt_dbg_core("------------------------------------------\n"); | ||
| 364 | gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", | ||
| 365 | vgpu->id, dmlr, engine_mask); | ||
| 366 | vgpu->resetting = true; | ||
| 367 | |||
| 368 | intel_vgpu_stop_schedule(vgpu); | ||
| 369 | /* | ||
| 370 | * The current_vgpu will set to NULL after stopping the | ||
| 371 | * scheduler when the reset is triggered by current vgpu. | ||
| 372 | */ | ||
| 373 | if (scheduler->current_vgpu == NULL) { | ||
| 374 | mutex_unlock(&gvt->lock); | ||
| 375 | intel_gvt_wait_vgpu_idle(vgpu); | ||
| 376 | mutex_lock(&gvt->lock); | ||
| 377 | } | ||
| 378 | |||
| 379 | intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); | ||
| 380 | |||
| 381 | /* full GPU reset or device model level reset */ | ||
| 382 | if (engine_mask == ALL_ENGINES || dmlr) { | ||
| 383 | intel_vgpu_reset_gtt(vgpu, dmlr); | ||
| 384 | intel_vgpu_reset_resource(vgpu); | ||
| 385 | intel_vgpu_reset_mmio(vgpu); | ||
| 386 | populate_pvinfo_page(vgpu); | ||
| 387 | |||
| 388 | if (dmlr) | ||
| 389 | intel_vgpu_reset_cfg_space(vgpu); | ||
| 390 | } | ||
| 391 | |||
| 392 | vgpu->resetting = false; | ||
| 393 | gvt_dbg_core("reset vgpu%d done\n", vgpu->id); | ||
| 394 | gvt_dbg_core("------------------------------------------\n"); | ||
| 395 | } | ||
| 396 | |||
| 397 | /** | ||
| 398 | * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) | ||
| 402 | * @vgpu: virtual GPU | 399 | * @vgpu: virtual GPU |
| 403 | * | 400 | * |
| 404 | * This function is called when user wants to reset a virtual GPU. | 401 | * This function is called when user wants to reset a virtual GPU. |
| @@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
| 406 | */ | 403 | */ |
| 407 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) | 404 | void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) |
| 408 | { | 405 | { |
| 406 | mutex_lock(&vgpu->gvt->lock); | ||
| 407 | intel_gvt_reset_vgpu_locked(vgpu, true, 0); | ||
| 408 | mutex_unlock(&vgpu->gvt->lock); | ||
| 409 | } | 409 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3dd7fc662859..4b23a7814713 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, | |||
| 595 | struct drm_i915_gem_pwrite *args, | 595 | struct drm_i915_gem_pwrite *args, |
| 596 | struct drm_file *file) | 596 | struct drm_file *file) |
| 597 | { | 597 | { |
| 598 | struct drm_device *dev = obj->base.dev; | ||
| 599 | void *vaddr = obj->phys_handle->vaddr + args->offset; | 598 | void *vaddr = obj->phys_handle->vaddr + args->offset; |
| 600 | char __user *user_data = u64_to_user_ptr(args->data_ptr); | 599 | char __user *user_data = u64_to_user_ptr(args->data_ptr); |
| 601 | int ret; | ||
| 602 | 600 | ||
| 603 | /* We manually control the domain here and pretend that it | 601 | /* We manually control the domain here and pretend that it |
| 604 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. | 602 | * remains coherent i.e. in the GTT domain, like shmem_pwrite. |
| 605 | */ | 603 | */ |
| 606 | lockdep_assert_held(&obj->base.dev->struct_mutex); | ||
| 607 | ret = i915_gem_object_wait(obj, | ||
| 608 | I915_WAIT_INTERRUPTIBLE | | ||
| 609 | I915_WAIT_LOCKED | | ||
| 610 | I915_WAIT_ALL, | ||
| 611 | MAX_SCHEDULE_TIMEOUT, | ||
| 612 | to_rps_client(file)); | ||
| 613 | if (ret) | ||
| 614 | return ret; | ||
| 615 | |||
| 616 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); | 604 | intel_fb_obj_invalidate(obj, ORIGIN_CPU); |
| 617 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { | 605 | if (copy_from_user(vaddr, user_data, args->size)) |
| 618 | unsigned long unwritten; | 606 | return -EFAULT; |
| 619 | |||
| 620 | /* The physical object once assigned is fixed for the lifetime | ||
| 621 | * of the obj, so we can safely drop the lock and continue | ||
| 622 | * to access vaddr. | ||
| 623 | */ | ||
| 624 | mutex_unlock(&dev->struct_mutex); | ||
| 625 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
| 626 | mutex_lock(&dev->struct_mutex); | ||
| 627 | if (unwritten) { | ||
| 628 | ret = -EFAULT; | ||
| 629 | goto out; | ||
| 630 | } | ||
| 631 | } | ||
| 632 | 607 | ||
| 633 | drm_clflush_virt_range(vaddr, args->size); | 608 | drm_clflush_virt_range(vaddr, args->size); |
| 634 | i915_gem_chipset_flush(to_i915(dev)); | 609 | i915_gem_chipset_flush(to_i915(obj->base.dev)); |
| 635 | 610 | ||
| 636 | out: | ||
| 637 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); | 611 | intel_fb_obj_flush(obj, false, ORIGIN_CPU); |
| 638 | return ret; | 612 | return 0; |
| 639 | } | 613 | } |
| 640 | 614 | ||
| 641 | void *i915_gem_object_alloc(struct drm_device *dev) | 615 | void *i915_gem_object_alloc(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index bd08814b015c..d534a316a16e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
| @@ -199,6 +199,7 @@ found: | |||
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | /* Unbinding will emit any required flushes */ | 201 | /* Unbinding will emit any required flushes */ |
| 202 | ret = 0; | ||
| 202 | while (!list_empty(&eviction_list)) { | 203 | while (!list_empty(&eviction_list)) { |
| 203 | vma = list_first_entry(&eviction_list, | 204 | vma = list_first_entry(&eviction_list, |
| 204 | struct i915_vma, | 205 | struct i915_vma, |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3dc8724df400..8d702cf1a616 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2967,6 +2967,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) | |||
| 2967 | unsigned int rotation = plane_state->base.rotation; | 2967 | unsigned int rotation = plane_state->base.rotation; |
| 2968 | int ret; | 2968 | int ret; |
| 2969 | 2969 | ||
| 2970 | if (!plane_state->base.visible) | ||
| 2971 | return 0; | ||
| 2972 | |||
| 2970 | /* Rotate src coordinates to match rotated GTT view */ | 2973 | /* Rotate src coordinates to match rotated GTT view */ |
| 2971 | if (drm_rotation_90_or_270(rotation)) | 2974 | if (drm_rotation_90_or_270(rotation)) |
| 2972 | drm_rect_rotate(&plane_state->base.src, | 2975 | drm_rect_rotate(&plane_state->base.src, |
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 3d546c019de0..b62e3f8ad415 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c | |||
| @@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) | |||
| 180 | 180 | ||
| 181 | /* Enable polling and queue hotplug re-enabling. */ | 181 | /* Enable polling and queue hotplug re-enabling. */ |
| 182 | if (hpd_disabled) { | 182 | if (hpd_disabled) { |
| 183 | drm_kms_helper_poll_enable_locked(dev); | 183 | drm_kms_helper_poll_enable(dev); |
| 184 | mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, | 184 | mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, |
| 185 | msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); | 185 | msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); |
| 186 | } | 186 | } |
| @@ -511,7 +511,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) | |||
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | if (enabled) | 513 | if (enabled) |
| 514 | drm_kms_helper_poll_enable_locked(dev); | 514 | drm_kms_helper_poll_enable(dev); |
| 515 | 515 | ||
| 516 | mutex_unlock(&dev->mode_config.mutex); | 516 | mutex_unlock(&dev->mode_config.mutex); |
| 517 | 517 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d4961fa20c73..beabc17e7c8a 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, | |||
| 979 | uint32_t *batch, | 979 | uint32_t *batch, |
| 980 | uint32_t index) | 980 | uint32_t index) |
| 981 | { | 981 | { |
| 982 | struct drm_i915_private *dev_priv = engine->i915; | ||
| 983 | uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); | 982 | uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); |
| 984 | 983 | ||
| 985 | /* | ||
| 986 | * WaDisableLSQCROPERFforOCL:kbl | ||
| 987 | * This WA is implemented in skl_init_clock_gating() but since | ||
| 988 | * this batch updates GEN8_L3SQCREG4 with default value we need to | ||
| 989 | * set this bit here to retain the WA during flush. | ||
| 990 | */ | ||
| 991 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) | ||
| 992 | l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; | ||
| 993 | |||
| 994 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | | 984 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | |
| 995 | MI_SRM_LRM_GLOBAL_GTT)); | 985 | MI_SRM_LRM_GLOBAL_GTT)); |
| 996 | wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); | 986 | wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index aeb637dc1fdf..91cb4c422ad5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) | |||
| 1095 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | 1095 | WA_SET_BIT_MASKED(HDC_CHICKEN0, |
| 1096 | HDC_FENCE_DEST_SLM_DISABLE); | 1096 | HDC_FENCE_DEST_SLM_DISABLE); |
| 1097 | 1097 | ||
| 1098 | /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes | ||
| 1099 | * involving this register should also be added to WA batch as required. | ||
| 1100 | */ | ||
| 1101 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0)) | ||
| 1102 | /* WaDisableLSQCROPERFforOCL:kbl */ | ||
| 1103 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | ||
| 1104 | GEN8_LQSC_RO_PERF_DIS); | ||
| 1105 | |||
| 1106 | /* WaToEnableHwFixForPushConstHWBug:kbl */ | 1098 | /* WaToEnableHwFixForPushConstHWBug:kbl */ |
| 1107 | if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) | 1099 | if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) |
| 1108 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 1100 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 14ff87686a36..686a580c711a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 345 | { | 345 | { |
| 346 | struct adreno_platform_config *config = pdev->dev.platform_data; | 346 | struct adreno_platform_config *config = pdev->dev.platform_data; |
| 347 | struct msm_gpu *gpu = &adreno_gpu->base; | 347 | struct msm_gpu *gpu = &adreno_gpu->base; |
| 348 | struct msm_mmu *mmu; | ||
| 349 | int ret; | 348 | int ret; |
| 350 | 349 | ||
| 351 | adreno_gpu->funcs = funcs; | 350 | adreno_gpu->funcs = funcs; |
| @@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 385 | return ret; | 384 | return ret; |
| 386 | } | 385 | } |
| 387 | 386 | ||
| 388 | mmu = gpu->aspace->mmu; | 387 | if (gpu->aspace && gpu->aspace->mmu) { |
| 389 | if (mmu) { | 388 | struct msm_mmu *mmu = gpu->aspace->mmu; |
| 390 | ret = mmu->funcs->attach(mmu, iommu_ports, | 389 | ret = mmu->funcs->attach(mmu, iommu_ports, |
| 391 | ARRAY_SIZE(iommu_ports)); | 390 | ARRAY_SIZE(iommu_ports)); |
| 392 | if (ret) | 391 | if (ret) |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 5f6cd8745dbc..c396d459a9d0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | |||
| @@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st | |||
| 119 | 119 | ||
| 120 | static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) | 120 | static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) |
| 121 | { | 121 | { |
| 122 | int i; | ||
| 123 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); | 122 | struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); |
| 124 | struct drm_plane *plane; | ||
| 125 | struct drm_plane_state *plane_state; | ||
| 126 | |||
| 127 | for_each_plane_in_state(state, plane, plane_state, i) | ||
| 128 | mdp5_plane_complete_commit(plane, plane_state); | ||
| 129 | 123 | ||
| 130 | if (mdp5_kms->smp) | 124 | if (mdp5_kms->smp) |
| 131 | mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); | 125 | mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 17b0cc101171..cdfc63d90c7b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | |||
| @@ -104,8 +104,6 @@ struct mdp5_plane_state { | |||
| 104 | 104 | ||
| 105 | /* assigned by crtc blender */ | 105 | /* assigned by crtc blender */ |
| 106 | enum mdp_mixer_stage_id stage; | 106 | enum mdp_mixer_stage_id stage; |
| 107 | |||
| 108 | bool pending : 1; | ||
| 109 | }; | 107 | }; |
| 110 | #define to_mdp5_plane_state(x) \ | 108 | #define to_mdp5_plane_state(x) \ |
| 111 | container_of(x, struct mdp5_plane_state, base) | 109 | container_of(x, struct mdp5_plane_state, base) |
| @@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms); | |||
| 232 | void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); | 230 | void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); |
| 233 | 231 | ||
| 234 | uint32_t mdp5_plane_get_flush(struct drm_plane *plane); | 232 | uint32_t mdp5_plane_get_flush(struct drm_plane *plane); |
| 235 | void mdp5_plane_complete_commit(struct drm_plane *plane, | ||
| 236 | struct drm_plane_state *state); | ||
| 237 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); | 233 | enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); |
| 238 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); | 234 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); |
| 239 | 235 | ||
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index c099da7bc212..25d9d0a97156 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
| @@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p, | |||
| 179 | drm_printf(p, "\tzpos=%u\n", pstate->zpos); | 179 | drm_printf(p, "\tzpos=%u\n", pstate->zpos); |
| 180 | drm_printf(p, "\talpha=%u\n", pstate->alpha); | 180 | drm_printf(p, "\talpha=%u\n", pstate->alpha); |
| 181 | drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); | 181 | drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage)); |
| 182 | drm_printf(p, "\tpending=%u\n", pstate->pending); | ||
| 183 | } | 182 | } |
| 184 | 183 | ||
| 185 | static void mdp5_plane_reset(struct drm_plane *plane) | 184 | static void mdp5_plane_reset(struct drm_plane *plane) |
| @@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) | |||
| 220 | if (mdp5_state && mdp5_state->base.fb) | 219 | if (mdp5_state && mdp5_state->base.fb) |
| 221 | drm_framebuffer_reference(mdp5_state->base.fb); | 220 | drm_framebuffer_reference(mdp5_state->base.fb); |
| 222 | 221 | ||
| 223 | mdp5_state->pending = false; | ||
| 224 | |||
| 225 | return &mdp5_state->base; | 222 | return &mdp5_state->base; |
| 226 | } | 223 | } |
| 227 | 224 | ||
| @@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, | |||
| 288 | DBG("%s: check (%d -> %d)", plane->name, | 285 | DBG("%s: check (%d -> %d)", plane->name, |
| 289 | plane_enabled(old_state), plane_enabled(state)); | 286 | plane_enabled(old_state), plane_enabled(state)); |
| 290 | 287 | ||
| 291 | /* We don't allow faster-than-vblank updates.. if we did add this | ||
| 292 | * some day, we would need to disallow in cases where hwpipe | ||
| 293 | * changes | ||
| 294 | */ | ||
| 295 | if (WARN_ON(to_mdp5_plane_state(old_state)->pending)) | ||
| 296 | return -EBUSY; | ||
| 297 | |||
| 298 | max_width = config->hw->lm.max_width << 16; | 288 | max_width = config->hw->lm.max_width << 16; |
| 299 | max_height = config->hw->lm.max_height << 16; | 289 | max_height = config->hw->lm.max_height << 16; |
| 300 | 290 | ||
| @@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, | |||
| 370 | struct drm_plane_state *old_state) | 360 | struct drm_plane_state *old_state) |
| 371 | { | 361 | { |
| 372 | struct drm_plane_state *state = plane->state; | 362 | struct drm_plane_state *state = plane->state; |
| 373 | struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); | ||
| 374 | 363 | ||
| 375 | DBG("%s: update", plane->name); | 364 | DBG("%s: update", plane->name); |
| 376 | 365 | ||
| 377 | mdp5_state->pending = true; | ||
| 378 | |||
| 379 | if (plane_enabled(state)) { | 366 | if (plane_enabled(state)) { |
| 380 | int ret; | 367 | int ret; |
| 381 | 368 | ||
| @@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) | |||
| 851 | return pstate->hwpipe->flush_mask; | 838 | return pstate->hwpipe->flush_mask; |
| 852 | } | 839 | } |
| 853 | 840 | ||
| 854 | /* called after vsync in thread context */ | ||
| 855 | void mdp5_plane_complete_commit(struct drm_plane *plane, | ||
| 856 | struct drm_plane_state *state) | ||
| 857 | { | ||
| 858 | struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); | ||
| 859 | |||
| 860 | pstate->pending = false; | ||
| 861 | } | ||
| 862 | |||
| 863 | /* initialize plane */ | 841 | /* initialize plane */ |
| 864 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) | 842 | struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) |
| 865 | { | 843 | { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index d8bc59c7e261..8098677a3916 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj) | |||
| 294 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 294 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 295 | 295 | ||
| 296 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { | 296 | for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { |
| 297 | if (!priv->aspace[id]) | ||
| 298 | continue; | ||
| 297 | msm_gem_unmap_vma(priv->aspace[id], | 299 | msm_gem_unmap_vma(priv->aspace[id], |
| 298 | &msm_obj->domain[id], msm_obj->sgt); | 300 | &msm_obj->domain[id], msm_obj->sgt); |
| 299 | } | 301 | } |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e8a38d296855..414776811e71 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin"); | |||
| 114 | MODULE_FIRMWARE("radeon/hainan_rlc.bin"); | 114 | MODULE_FIRMWARE("radeon/hainan_rlc.bin"); |
| 115 | MODULE_FIRMWARE("radeon/hainan_smc.bin"); | 115 | MODULE_FIRMWARE("radeon/hainan_smc.bin"); |
| 116 | MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); | 116 | MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); |
| 117 | MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); | ||
| 118 | |||
| 119 | MODULE_FIRMWARE("radeon/si58_mc.bin"); | ||
| 117 | 120 | ||
| 118 | static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); | 121 | static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); |
| 119 | static void si_pcie_gen3_enable(struct radeon_device *rdev); | 122 | static void si_pcie_gen3_enable(struct radeon_device *rdev); |
| @@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1650 | int err; | 1653 | int err; |
| 1651 | int new_fw = 0; | 1654 | int new_fw = 0; |
| 1652 | bool new_smc = false; | 1655 | bool new_smc = false; |
| 1656 | bool si58_fw = false; | ||
| 1657 | bool banks2_fw = false; | ||
| 1653 | 1658 | ||
| 1654 | DRM_DEBUG("\n"); | 1659 | DRM_DEBUG("\n"); |
| 1655 | 1660 | ||
| @@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1727 | ((rdev->pdev->device == 0x6660) || | 1732 | ((rdev->pdev->device == 0x6660) || |
| 1728 | (rdev->pdev->device == 0x6663) || | 1733 | (rdev->pdev->device == 0x6663) || |
| 1729 | (rdev->pdev->device == 0x6665) || | 1734 | (rdev->pdev->device == 0x6665) || |
| 1730 | (rdev->pdev->device == 0x6667))) || | 1735 | (rdev->pdev->device == 0x6667)))) |
| 1731 | ((rdev->pdev->revision == 0xc3) && | ||
| 1732 | (rdev->pdev->device == 0x6665))) | ||
| 1733 | new_smc = true; | 1736 | new_smc = true; |
| 1737 | else if ((rdev->pdev->revision == 0xc3) && | ||
| 1738 | (rdev->pdev->device == 0x6665)) | ||
| 1739 | banks2_fw = true; | ||
| 1734 | new_chip_name = "hainan"; | 1740 | new_chip_name = "hainan"; |
| 1735 | pfp_req_size = SI_PFP_UCODE_SIZE * 4; | 1741 | pfp_req_size = SI_PFP_UCODE_SIZE * 4; |
| 1736 | me_req_size = SI_PM4_UCODE_SIZE * 4; | 1742 | me_req_size = SI_PM4_UCODE_SIZE * 4; |
| @@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1742 | default: BUG(); | 1748 | default: BUG(); |
| 1743 | } | 1749 | } |
| 1744 | 1750 | ||
| 1751 | /* this memory configuration requires special firmware */ | ||
| 1752 | if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) | ||
| 1753 | si58_fw = true; | ||
| 1754 | |||
| 1745 | DRM_INFO("Loading %s Microcode\n", new_chip_name); | 1755 | DRM_INFO("Loading %s Microcode\n", new_chip_name); |
| 1746 | 1756 | ||
| 1747 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); | 1757 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); |
| @@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1845 | } | 1855 | } |
| 1846 | } | 1856 | } |
| 1847 | 1857 | ||
| 1848 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); | 1858 | if (si58_fw) |
| 1859 | snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); | ||
| 1860 | else | ||
| 1861 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); | ||
| 1849 | err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); | 1862 | err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); |
| 1850 | if (err) { | 1863 | if (err) { |
| 1851 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); | 1864 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); |
| @@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
| 1876 | } | 1889 | } |
| 1877 | } | 1890 | } |
| 1878 | 1891 | ||
| 1879 | if (new_smc) | 1892 | if (banks2_fw) |
| 1893 | snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin"); | ||
| 1894 | else if (new_smc) | ||
| 1880 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); | 1895 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); |
| 1881 | else | 1896 | else |
| 1882 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); | 1897 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 13ba73fd9b68..2944916f7102 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 3008 | (rdev->pdev->device == 0x6817) || | 3008 | (rdev->pdev->device == 0x6817) || |
| 3009 | (rdev->pdev->device == 0x6806)) | 3009 | (rdev->pdev->device == 0x6806)) |
| 3010 | max_mclk = 120000; | 3010 | max_mclk = 120000; |
| 3011 | } else if (rdev->family == CHIP_OLAND) { | ||
| 3012 | if ((rdev->pdev->revision == 0xC7) || | ||
| 3013 | (rdev->pdev->revision == 0x80) || | ||
| 3014 | (rdev->pdev->revision == 0x81) || | ||
| 3015 | (rdev->pdev->revision == 0x83) || | ||
| 3016 | (rdev->pdev->revision == 0x87) || | ||
| 3017 | (rdev->pdev->device == 0x6604) || | ||
| 3018 | (rdev->pdev->device == 0x6605)) { | ||
| 3019 | max_sclk = 75000; | ||
| 3020 | max_mclk = 80000; | ||
| 3021 | } | ||
| 3022 | } else if (rdev->family == CHIP_HAINAN) { | 3011 | } else if (rdev->family == CHIP_HAINAN) { |
| 3023 | if ((rdev->pdev->revision == 0x81) || | 3012 | if ((rdev->pdev->revision == 0x81) || |
| 3024 | (rdev->pdev->revision == 0x83) || | 3013 | (rdev->pdev->revision == 0x83) || |
| @@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 3027 | (rdev->pdev->device == 0x6665) || | 3016 | (rdev->pdev->device == 0x6665) || |
| 3028 | (rdev->pdev->device == 0x6667)) { | 3017 | (rdev->pdev->device == 0x6667)) { |
| 3029 | max_sclk = 75000; | 3018 | max_sclk = 75000; |
| 3030 | max_mclk = 80000; | ||
| 3031 | } | 3019 | } |
| 3032 | } | 3020 | } |
| 3033 | /* Apply dpm quirks */ | 3021 | /* Apply dpm quirks */ |
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index dd21f950e129..cde9f3758106 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c | |||
| @@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, | |||
| 331 | info->fbops = &virtio_gpufb_ops; | 331 | info->fbops = &virtio_gpufb_ops; |
| 332 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | 332 | info->pixmap.flags = FB_PIXMAP_SYSTEM; |
| 333 | 333 | ||
| 334 | info->screen_base = obj->vmap; | 334 | info->screen_buffer = obj->vmap; |
| 335 | info->screen_size = obj->gem_base.size; | 335 | info->screen_size = obj->gem_base.size; |
| 336 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | 336 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); |
| 337 | drm_fb_helper_fill_var(info, &vfbdev->helper, | 337 | drm_fb_helper_fill_var(info, &vfbdev->helper, |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 982c299e435a..d026f5017c33 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
| @@ -73,6 +73,5 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); | |||
| 73 | 73 | ||
| 74 | extern void drm_kms_helper_poll_disable(struct drm_device *dev); | 74 | extern void drm_kms_helper_poll_disable(struct drm_device *dev); |
| 75 | extern void drm_kms_helper_poll_enable(struct drm_device *dev); | 75 | extern void drm_kms_helper_poll_enable(struct drm_device *dev); |
| 76 | extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); | ||
| 77 | 76 | ||
| 78 | #endif | 77 | #endif |
