diff options
author | Dave Airlie <airlied@redhat.com> | 2017-05-12 00:25:22 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-05-12 00:25:22 -0400 |
commit | bb57d0411a41f525c88f0ca3cc29936e0b879a1b (patch) | |
tree | 85bafb42057d7c905b9e342cf576cd5c8e59dcc6 /drivers | |
parent | 7ec27233e62b5efe795563896577de5340dc7473 (diff) | |
parent | e4311ee51d1e2676001b2d8fcefd92bdd79aad85 (diff) |
Merge branch 'linux-4.12' of git://github.com/skeggsb/linux into drm-next
Quite a few patches, but not much code changed:
- Fixes regression from atomic when only the source rect of a plane
changes (ie. xrandr --right-of)
- Fixes another issue where atomic changed behaviour underneath us,
potentially causing laggy cursor position updates
- Fixes for a bunch of races in thermal code, which lead to random
lockups for a lot of users
* 'linux-4.12' of git://github.com/skeggsb/linux:
drm/nouveau/therm: remove ineffective workarounds for alarm bugs
drm/nouveau/tmr: avoid processing completed alarms when adding a new one
drm/nouveau/tmr: fix corruption of the pending list when rescheduling an alarm
drm/nouveau/tmr: handle races with hw when updating the next alarm time
drm/nouveau/tmr: ack interrupt before processing alarms
drm/nouveau/core: fix static checker warning
drm/nouveau/fb/ram/gf100-: remove 0x10f200 read
drm/nouveau/kms/nv50: skip core channel cursor update on position-only changes
drm/nouveau/kms/nv50: fix source-rect-only plane updates
drm/nouveau/kms/nv50: remove pointless argument to window atomic_check_acquire()
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_display.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/object.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c | 59 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c | 2 |
9 files changed, 60 insertions, 41 deletions
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 0e58537352fe..a7663249b3ba 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -831,8 +831,7 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw, | |||
831 | static int | 831 | static int |
832 | nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, | 832 | nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, |
833 | struct nv50_wndw_atom *asyw, | 833 | struct nv50_wndw_atom *asyw, |
834 | struct nv50_head_atom *asyh, | 834 | struct nv50_head_atom *asyh) |
835 | u32 pflip_flags) | ||
836 | { | 835 | { |
837 | struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb); | 836 | struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb); |
838 | struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); | 837 | struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev); |
@@ -848,7 +847,10 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, | |||
848 | asyw->image.h = fb->base.height; | 847 | asyw->image.h = fb->base.height; |
849 | asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8; | 848 | asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8; |
850 | 849 | ||
851 | asyw->interval = pflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? 0 : 1; | 850 | if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) |
851 | asyw->interval = 0; | ||
852 | else | ||
853 | asyw->interval = 1; | ||
852 | 854 | ||
853 | if (asyw->image.kind) { | 855 | if (asyw->image.kind) { |
854 | asyw->image.layout = 0; | 856 | asyw->image.layout = 0; |
@@ -887,7 +889,6 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) | |||
887 | struct nv50_head_atom *harm = NULL, *asyh = NULL; | 889 | struct nv50_head_atom *harm = NULL, *asyh = NULL; |
888 | bool varm = false, asyv = false, asym = false; | 890 | bool varm = false, asyv = false, asym = false; |
889 | int ret; | 891 | int ret; |
890 | u32 pflip_flags = 0; | ||
891 | 892 | ||
892 | NV_ATOMIC(drm, "%s atomic_check\n", plane->name); | 893 | NV_ATOMIC(drm, "%s atomic_check\n", plane->name); |
893 | if (asyw->state.crtc) { | 894 | if (asyw->state.crtc) { |
@@ -896,7 +897,6 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) | |||
896 | return PTR_ERR(asyh); | 897 | return PTR_ERR(asyh); |
897 | asym = drm_atomic_crtc_needs_modeset(&asyh->state); | 898 | asym = drm_atomic_crtc_needs_modeset(&asyh->state); |
898 | asyv = asyh->state.active; | 899 | asyv = asyh->state.active; |
899 | pflip_flags = asyh->state.pageflip_flags; | ||
900 | } | 900 | } |
901 | 901 | ||
902 | if (armw->state.crtc) { | 902 | if (armw->state.crtc) { |
@@ -912,12 +912,9 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) | |||
912 | if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point))) | 912 | if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point))) |
913 | asyw->set.point = true; | 913 | asyw->set.point = true; |
914 | 914 | ||
915 | if (!varm || asym || armw->state.fb != asyw->state.fb) { | 915 | ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh); |
916 | ret = nv50_wndw_atomic_check_acquire( | 916 | if (ret) |
917 | wndw, asyw, asyh, pflip_flags); | 917 | return ret; |
918 | if (ret) | ||
919 | return ret; | ||
920 | } | ||
921 | } else | 918 | } else |
922 | if (varm) { | 919 | if (varm) { |
923 | nv50_wndw_atomic_check_release(wndw, asyw, harm); | 920 | nv50_wndw_atomic_check_release(wndw, asyw, harm); |
@@ -1122,9 +1119,13 @@ static void | |||
1122 | nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh, | 1119 | nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh, |
1123 | struct nv50_wndw_atom *asyw) | 1120 | struct nv50_wndw_atom *asyw) |
1124 | { | 1121 | { |
1125 | asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle; | 1122 | u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle; |
1126 | asyh->curs.offset = asyw->image.offset; | 1123 | u32 offset = asyw->image.offset; |
1127 | asyh->set.curs = asyh->curs.visible; | 1124 | if (asyh->curs.handle != handle || asyh->curs.offset != offset) { |
1125 | asyh->curs.handle = handle; | ||
1126 | asyh->curs.offset = offset; | ||
1127 | asyh->set.curs = asyh->curs.visible; | ||
1128 | } | ||
1128 | } | 1129 | } |
1129 | 1130 | ||
1130 | static void | 1131 | static void |
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c index 89d2e9da11c7..acd76fd4f6d8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/object.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c | |||
@@ -295,7 +295,7 @@ nvkm_object_ctor(const struct nvkm_object_func *func, | |||
295 | INIT_LIST_HEAD(&object->head); | 295 | INIT_LIST_HEAD(&object->head); |
296 | INIT_LIST_HEAD(&object->tree); | 296 | INIT_LIST_HEAD(&object->tree); |
297 | RB_CLEAR_NODE(&object->node); | 297 | RB_CLEAR_NODE(&object->node); |
298 | WARN_ON(oclass->engine && !object->engine); | 298 | WARN_ON(IS_ERR(object->engine)); |
299 | } | 299 | } |
300 | 300 | ||
301 | int | 301 | int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c index c63975907c90..4a9bd4f1cb93 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c | |||
@@ -638,7 +638,6 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb, | |||
638 | return ret; | 638 | return ret; |
639 | } | 639 | } |
640 | 640 | ||
641 | ram->ranks = (nvkm_rd32(device, 0x10f200) & 0x00000004) ? 2 : 1; | ||
642 | return 0; | 641 | return 0; |
643 | } | 642 | } |
644 | 643 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c index df949fa7d05d..be691a7b972f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c | |||
@@ -146,7 +146,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode) | |||
146 | poll = false; | 146 | poll = false; |
147 | } | 147 | } |
148 | 148 | ||
149 | if (list_empty(&therm->alarm.head) && poll) | 149 | if (poll) |
150 | nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm); | 150 | nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm); |
151 | spin_unlock_irqrestore(&therm->lock, flags); | 151 | spin_unlock_irqrestore(&therm->lock, flags); |
152 | 152 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c index 91198d79393a..e2feccec25f5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c | |||
@@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target) | |||
83 | spin_unlock_irqrestore(&fan->lock, flags); | 83 | spin_unlock_irqrestore(&fan->lock, flags); |
84 | 84 | ||
85 | /* schedule next fan update, if not at target speed already */ | 85 | /* schedule next fan update, if not at target speed already */ |
86 | if (list_empty(&fan->alarm.head) && target != duty) { | 86 | if (target != duty) { |
87 | u16 bump_period = fan->bios.bump_period; | 87 | u16 bump_period = fan->bios.bump_period; |
88 | u16 slow_down_period = fan->bios.slow_down_period; | 88 | u16 slow_down_period = fan->bios.slow_down_period; |
89 | u64 delay; | 89 | u64 delay; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c index 59701b7a6597..ff9fbe7950e5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c | |||
@@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent) | |||
53 | duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff); | 53 | duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff); |
54 | nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty); | 54 | nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty); |
55 | 55 | ||
56 | if (list_empty(&fan->alarm.head) && percent != (duty * 100)) { | 56 | if (percent != (duty * 100)) { |
57 | u64 next_change = (percent * fan->period_us) / 100; | 57 | u64 next_change = (percent * fan->period_us) / 100; |
58 | if (!duty) | 58 | if (!duty) |
59 | next_change = fan->period_us - next_change; | 59 | next_change = fan->period_us - next_change; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c index b9703c02d8ca..9a79e91fdfdc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c | |||
@@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm) | |||
185 | spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags); | 185 | spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags); |
186 | 186 | ||
187 | /* schedule the next poll in one second */ | 187 | /* schedule the next poll in one second */ |
188 | if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head)) | 188 | if (therm->func->temp_get(therm) >= 0) |
189 | nvkm_timer_alarm(tmr, 1000000000ULL, alarm); | 189 | nvkm_timer_alarm(tmr, 1000000000ULL, alarm); |
190 | } | 190 | } |
191 | 191 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c index 07dc82bfe346..f2a86eae0a0d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c | |||
@@ -36,23 +36,29 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) | |||
36 | unsigned long flags; | 36 | unsigned long flags; |
37 | LIST_HEAD(exec); | 37 | LIST_HEAD(exec); |
38 | 38 | ||
39 | /* move any due alarms off the pending list */ | 39 | /* Process pending alarms. */ |
40 | spin_lock_irqsave(&tmr->lock, flags); | 40 | spin_lock_irqsave(&tmr->lock, flags); |
41 | list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) { | 41 | list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) { |
42 | if (alarm->timestamp <= nvkm_timer_read(tmr)) | 42 | /* Have we hit the earliest alarm that hasn't gone off? */ |
43 | list_move_tail(&alarm->head, &exec); | 43 | if (alarm->timestamp > nvkm_timer_read(tmr)) { |
44 | /* Schedule it. If we didn't race, we're done. */ | ||
45 | tmr->func->alarm_init(tmr, alarm->timestamp); | ||
46 | if (alarm->timestamp > nvkm_timer_read(tmr)) | ||
47 | break; | ||
48 | } | ||
49 | |||
50 | /* Move to completed list. We'll drop the lock before | ||
51 | * executing the callback so it can reschedule itself. | ||
52 | */ | ||
53 | list_move_tail(&alarm->head, &exec); | ||
44 | } | 54 | } |
45 | 55 | ||
46 | /* reschedule interrupt for next alarm time */ | 56 | /* Shut down interrupt if no more pending alarms. */ |
47 | if (!list_empty(&tmr->alarms)) { | 57 | if (list_empty(&tmr->alarms)) |
48 | alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head); | ||
49 | tmr->func->alarm_init(tmr, alarm->timestamp); | ||
50 | } else { | ||
51 | tmr->func->alarm_fini(tmr); | 58 | tmr->func->alarm_fini(tmr); |
52 | } | ||
53 | spin_unlock_irqrestore(&tmr->lock, flags); | 59 | spin_unlock_irqrestore(&tmr->lock, flags); |
54 | 60 | ||
55 | /* execute any pending alarm handlers */ | 61 | /* Execute completed callbacks. */ |
56 | list_for_each_entry_safe(alarm, atemp, &exec, head) { | 62 | list_for_each_entry_safe(alarm, atemp, &exec, head) { |
57 | list_del_init(&alarm->head); | 63 | list_del_init(&alarm->head); |
58 | alarm->func(alarm); | 64 | alarm->func(alarm); |
@@ -65,24 +71,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm) | |||
65 | struct nvkm_alarm *list; | 71 | struct nvkm_alarm *list; |
66 | unsigned long flags; | 72 | unsigned long flags; |
67 | 73 | ||
68 | alarm->timestamp = nvkm_timer_read(tmr) + nsec; | 74 | /* Remove alarm from pending list. |
69 | 75 | * | |
70 | /* append new alarm to list, in soonest-alarm-first order */ | 76 | * This both protects against the corruption of the list, |
77 | * and implements alarm rescheduling/cancellation. | ||
78 | */ | ||
71 | spin_lock_irqsave(&tmr->lock, flags); | 79 | spin_lock_irqsave(&tmr->lock, flags); |
72 | if (!nsec) { | 80 | list_del_init(&alarm->head); |
73 | if (!list_empty(&alarm->head)) | 81 | |
74 | list_del(&alarm->head); | 82 | if (nsec) { |
75 | } else { | 83 | /* Insert into pending list, ordered earliest to latest. */ |
84 | alarm->timestamp = nvkm_timer_read(tmr) + nsec; | ||
76 | list_for_each_entry(list, &tmr->alarms, head) { | 85 | list_for_each_entry(list, &tmr->alarms, head) { |
77 | if (list->timestamp > alarm->timestamp) | 86 | if (list->timestamp > alarm->timestamp) |
78 | break; | 87 | break; |
79 | } | 88 | } |
89 | |||
80 | list_add_tail(&alarm->head, &list->head); | 90 | list_add_tail(&alarm->head, &list->head); |
91 | |||
92 | /* Update HW if this is now the earliest alarm. */ | ||
93 | list = list_first_entry(&tmr->alarms, typeof(*list), head); | ||
94 | if (list == alarm) { | ||
95 | tmr->func->alarm_init(tmr, alarm->timestamp); | ||
96 | /* This shouldn't happen if callers aren't stupid. | ||
97 | * | ||
98 | * Worst case scenario is that it'll take roughly | ||
99 | * 4 seconds for the next alarm to trigger. | ||
100 | */ | ||
101 | WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr)); | ||
102 | } | ||
81 | } | 103 | } |
82 | spin_unlock_irqrestore(&tmr->lock, flags); | 104 | spin_unlock_irqrestore(&tmr->lock, flags); |
83 | |||
84 | /* process pending alarms */ | ||
85 | nvkm_timer_alarm_trigger(tmr); | ||
86 | } | 105 | } |
87 | 106 | ||
88 | void | 107 | void |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c index 7b9ce87f0617..7f48249f41de 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c | |||
@@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr) | |||
76 | u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0); | 76 | u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0); |
77 | 77 | ||
78 | if (stat & 0x00000001) { | 78 | if (stat & 0x00000001) { |
79 | nvkm_timer_alarm_trigger(tmr); | ||
80 | nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001); | 79 | nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001); |
80 | nvkm_timer_alarm_trigger(tmr); | ||
81 | stat &= ~0x00000001; | 81 | stat &= ~0x00000001; |
82 | } | 82 | } |
83 | 83 | ||