diff options
author | Dave Airlie <airlied@redhat.com> | 2015-12-07 03:17:09 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-12-07 03:17:09 -0500 |
commit | 47c0fd72822159eb501411f975f5672a0bf7a7fb (patch) | |
tree | a7350e9bd6cf2c6d7295b3f61d7c85053f2ad199 | |
parent | 80d69009ef67d0753c1c30c62056a04275898531 (diff) | |
parent | 4e15f2a1a056ff2695611c3e8d0b162526e84355 (diff) |
Merge tag 'topic/drm-misc-2015-12-04' of git://anongit.freedesktop.org/drm-intel into drm-next
New -misc pull. Big thing is Thierry's atomic helpers for system suspend
resume, which I'd like to use in i915 too. Hence the pull.
* tag 'topic/drm-misc-2015-12-04' of git://anongit.freedesktop.org/drm-intel:
drm: keep connector status change logging human readable
drm/atomic-helper: Reject attempts at re-stealing encoders
drm/atomic-helper: Implement subsystem-level suspend/resume
drm: Implement drm_modeset_lock_all_ctx()
drm/gma500: Add driver private mutex for the fault handler
drm/gma500: Drop dev->struct_mutex from mmap offset function
drm/gma500: Drop dev->struct_mutex from fbdev init/teardown code
drm/gma500: Drop dev->struct_mutex from modeset code
drm/gma500: Use correct unref in the gem bo create function
drm/edid: Make the detailed timing CEA/HDMI mode fixup accept up to 5kHz clock difference
drm/atomic_helper: Add drm_atomic_helper_disable_planes_on_crtc()
drm: Serialise multiple event readers
drm: Drop dev->event_lock spinlock around faulting copy_to_user()
-rw-r--r-- | drivers/gpu/drm/drm_atomic.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_atomic_helper.c | 233 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_crtc_helper.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_edid.c | 62 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_fops.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_modes.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_modeset_lock.c | 89 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_probe_helper.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/gma500/framebuffer.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/gma500/gem.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/gma500/gma_display.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/gma500/gtt.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/gma500/psb_drv.h | 2 | ||||
-rw-r--r-- | include/drm/drmP.h | 2 | ||||
-rw-r--r-- | include/drm/drm_atomic_helper.h | 8 | ||||
-rw-r--r-- | include/drm/drm_modes.h | 2 | ||||
-rw-r--r-- | include/drm/drm_modeset_lock.h | 4 |
17 files changed, 445 insertions, 97 deletions
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 55b4debad79b..ef5f7663a718 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -1188,12 +1188,7 @@ void drm_atomic_legacy_backoff(struct drm_atomic_state *state) | |||
1188 | retry: | 1188 | retry: |
1189 | drm_modeset_backoff(state->acquire_ctx); | 1189 | drm_modeset_backoff(state->acquire_ctx); |
1190 | 1190 | ||
1191 | ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, | 1191 | ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx); |
1192 | state->acquire_ctx); | ||
1193 | if (ret) | ||
1194 | goto retry; | ||
1195 | ret = drm_modeset_lock_all_crtcs(state->dev, | ||
1196 | state->acquire_ctx); | ||
1197 | if (ret) | 1192 | if (ret) |
1198 | goto retry; | 1193 | goto retry; |
1199 | } | 1194 | } |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 3731a26979bc..74a5fc4deef6 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
@@ -80,6 +80,27 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, | |||
80 | } | 80 | } |
81 | } | 81 | } |
82 | 82 | ||
83 | static bool | ||
84 | check_pending_encoder_assignment(struct drm_atomic_state *state, | ||
85 | struct drm_encoder *new_encoder, | ||
86 | struct drm_connector *new_connector) | ||
87 | { | ||
88 | struct drm_connector *connector; | ||
89 | struct drm_connector_state *conn_state; | ||
90 | int i; | ||
91 | |||
92 | for_each_connector_in_state(state, connector, conn_state, i) { | ||
93 | if (conn_state->best_encoder != new_encoder) | ||
94 | continue; | ||
95 | |||
96 | /* encoder already assigned and we're trying to re-steal it! */ | ||
97 | if (connector->state->best_encoder != conn_state->best_encoder) | ||
98 | return false; | ||
99 | } | ||
100 | |||
101 | return true; | ||
102 | } | ||
103 | |||
83 | static struct drm_crtc * | 104 | static struct drm_crtc * |
84 | get_current_crtc_for_encoder(struct drm_device *dev, | 105 | get_current_crtc_for_encoder(struct drm_device *dev, |
85 | struct drm_encoder *encoder) | 106 | struct drm_encoder *encoder) |
@@ -229,6 +250,13 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) | |||
229 | return 0; | 250 | return 0; |
230 | } | 251 | } |
231 | 252 | ||
253 | if (!check_pending_encoder_assignment(state, new_encoder, connector)) { | ||
254 | DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n", | ||
255 | connector->base.id, | ||
256 | connector->name); | ||
257 | return -EINVAL; | ||
258 | } | ||
259 | |||
232 | encoder_crtc = get_current_crtc_for_encoder(state->dev, | 260 | encoder_crtc = get_current_crtc_for_encoder(state->dev, |
233 | new_encoder); | 261 | new_encoder); |
234 | 262 | ||
@@ -1342,6 +1370,49 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) | |||
1342 | EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); | 1370 | EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); |
1343 | 1371 | ||
1344 | /** | 1372 | /** |
1373 | * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes | ||
1374 | * @crtc: CRTC | ||
1375 | * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks | ||
1376 | * | ||
1377 | * Disables all planes associated with the given CRTC. This can be | ||
1378 | * used for instance in the CRTC helper disable callback to disable | ||
1379 | * all planes before shutting down the display pipeline. | ||
1380 | * | ||
1381 | * If the atomic-parameter is set the function calls the CRTC's | ||
1382 | * atomic_begin hook before and atomic_flush hook after disabling the | ||
1383 | * planes. | ||
1384 | * | ||
1385 | * It is a bug to call this function without having implemented the | ||
1386 | * ->atomic_disable() plane hook. | ||
1387 | */ | ||
1388 | void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, | ||
1389 | bool atomic) | ||
1390 | { | ||
1391 | const struct drm_crtc_helper_funcs *crtc_funcs = | ||
1392 | crtc->helper_private; | ||
1393 | struct drm_plane *plane; | ||
1394 | |||
1395 | if (atomic && crtc_funcs && crtc_funcs->atomic_begin) | ||
1396 | crtc_funcs->atomic_begin(crtc, NULL); | ||
1397 | |||
1398 | drm_for_each_plane(plane, crtc->dev) { | ||
1399 | const struct drm_plane_helper_funcs *plane_funcs = | ||
1400 | plane->helper_private; | ||
1401 | |||
1402 | if (plane->state->crtc != crtc || !plane_funcs) | ||
1403 | continue; | ||
1404 | |||
1405 | WARN_ON(!plane_funcs->atomic_disable); | ||
1406 | if (plane_funcs->atomic_disable) | ||
1407 | plane_funcs->atomic_disable(plane, NULL); | ||
1408 | } | ||
1409 | |||
1410 | if (atomic && crtc_funcs && crtc_funcs->atomic_flush) | ||
1411 | crtc_funcs->atomic_flush(crtc, NULL); | ||
1412 | } | ||
1413 | EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc); | ||
1414 | |||
1415 | /** | ||
1345 | * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit | 1416 | * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit |
1346 | * @dev: DRM device | 1417 | * @dev: DRM device |
1347 | * @old_state: atomic state object with old state structures | 1418 | * @old_state: atomic state object with old state structures |
@@ -1818,6 +1889,161 @@ commit: | |||
1818 | } | 1889 | } |
1819 | 1890 | ||
1820 | /** | 1891 | /** |
1892 | * drm_atomic_helper_disable_all - disable all currently active outputs | ||
1893 | * @dev: DRM device | ||
1894 | * @ctx: lock acquisition context | ||
1895 | * | ||
1896 | * Loops through all connectors, finding those that aren't turned off and then | ||
1897 | * turns them off by setting their DPMS mode to OFF and deactivating the CRTC | ||
1898 | * that they are connected to. | ||
1899 | * | ||
1900 | * This is used for example in suspend/resume to disable all currently active | ||
1901 | * functions when suspending. | ||
1902 | * | ||
1903 | * Note that if callers haven't already acquired all modeset locks this might | ||
1904 | * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). | ||
1905 | * | ||
1906 | * Returns: | ||
1907 | * 0 on success or a negative error code on failure. | ||
1908 | * | ||
1909 | * See also: | ||
1910 | * drm_atomic_helper_suspend(), drm_atomic_helper_resume() | ||
1911 | */ | ||
1912 | int drm_atomic_helper_disable_all(struct drm_device *dev, | ||
1913 | struct drm_modeset_acquire_ctx *ctx) | ||
1914 | { | ||
1915 | struct drm_atomic_state *state; | ||
1916 | struct drm_connector *conn; | ||
1917 | int err; | ||
1918 | |||
1919 | state = drm_atomic_state_alloc(dev); | ||
1920 | if (!state) | ||
1921 | return -ENOMEM; | ||
1922 | |||
1923 | state->acquire_ctx = ctx; | ||
1924 | |||
1925 | drm_for_each_connector(conn, dev) { | ||
1926 | struct drm_crtc *crtc = conn->state->crtc; | ||
1927 | struct drm_crtc_state *crtc_state; | ||
1928 | |||
1929 | if (!crtc || conn->dpms != DRM_MODE_DPMS_ON) | ||
1930 | continue; | ||
1931 | |||
1932 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | ||
1933 | if (IS_ERR(crtc_state)) { | ||
1934 | err = PTR_ERR(crtc_state); | ||
1935 | goto free; | ||
1936 | } | ||
1937 | |||
1938 | crtc_state->active = false; | ||
1939 | } | ||
1940 | |||
1941 | err = drm_atomic_commit(state); | ||
1942 | |||
1943 | free: | ||
1944 | if (err < 0) | ||
1945 | drm_atomic_state_free(state); | ||
1946 | |||
1947 | return err; | ||
1948 | } | ||
1949 | EXPORT_SYMBOL(drm_atomic_helper_disable_all); | ||
1950 | |||
1951 | /** | ||
1952 | * drm_atomic_helper_suspend - subsystem-level suspend helper | ||
1953 | * @dev: DRM device | ||
1954 | * | ||
1955 | * Duplicates the current atomic state, disables all active outputs and then | ||
1956 | * returns a pointer to the original atomic state to the caller. Drivers can | ||
1957 | * pass this pointer to the drm_atomic_helper_resume() helper upon resume to | ||
1958 | * restore the output configuration that was active at the time the system | ||
1959 | * entered suspend. | ||
1960 | * | ||
1961 | * Note that it is potentially unsafe to use this. The atomic state object | ||
1962 | * returned by this function is assumed to be persistent. Drivers must ensure | ||
1963 | * that this holds true. Before calling this function, drivers must make sure | ||
1964 | * to suspend fbdev emulation so that nothing can be using the device. | ||
1965 | * | ||
1966 | * Returns: | ||
1967 | * A pointer to a copy of the state before suspend on success or an ERR_PTR()- | ||
1968 | * encoded error code on failure. Drivers should store the returned atomic | ||
1969 | * state object and pass it to the drm_atomic_helper_resume() helper upon | ||
1970 | * resume. | ||
1971 | * | ||
1972 | * See also: | ||
1973 | * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(), | ||
1974 | * drm_atomic_helper_resume() | ||
1975 | */ | ||
1976 | struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) | ||
1977 | { | ||
1978 | struct drm_modeset_acquire_ctx ctx; | ||
1979 | struct drm_atomic_state *state; | ||
1980 | int err; | ||
1981 | |||
1982 | drm_modeset_acquire_init(&ctx, 0); | ||
1983 | |||
1984 | retry: | ||
1985 | err = drm_modeset_lock_all_ctx(dev, &ctx); | ||
1986 | if (err < 0) { | ||
1987 | state = ERR_PTR(err); | ||
1988 | goto unlock; | ||
1989 | } | ||
1990 | |||
1991 | state = drm_atomic_helper_duplicate_state(dev, &ctx); | ||
1992 | if (IS_ERR(state)) | ||
1993 | goto unlock; | ||
1994 | |||
1995 | err = drm_atomic_helper_disable_all(dev, &ctx); | ||
1996 | if (err < 0) { | ||
1997 | drm_atomic_state_free(state); | ||
1998 | state = ERR_PTR(err); | ||
1999 | goto unlock; | ||
2000 | } | ||
2001 | |||
2002 | unlock: | ||
2003 | if (PTR_ERR(state) == -EDEADLK) { | ||
2004 | drm_modeset_backoff(&ctx); | ||
2005 | goto retry; | ||
2006 | } | ||
2007 | |||
2008 | drm_modeset_drop_locks(&ctx); | ||
2009 | drm_modeset_acquire_fini(&ctx); | ||
2010 | return state; | ||
2011 | } | ||
2012 | EXPORT_SYMBOL(drm_atomic_helper_suspend); | ||
2013 | |||
2014 | /** | ||
2015 | * drm_atomic_helper_resume - subsystem-level resume helper | ||
2016 | * @dev: DRM device | ||
2017 | * @state: atomic state to resume to | ||
2018 | * | ||
2019 | * Calls drm_mode_config_reset() to synchronize hardware and software states, | ||
2020 | * grabs all modeset locks and commits the atomic state object. This can be | ||
2021 | * used in conjunction with the drm_atomic_helper_suspend() helper to | ||
2022 | * implement suspend/resume for drivers that support atomic mode-setting. | ||
2023 | * | ||
2024 | * Returns: | ||
2025 | * 0 on success or a negative error code on failure. | ||
2026 | * | ||
2027 | * See also: | ||
2028 | * drm_atomic_helper_suspend() | ||
2029 | */ | ||
2030 | int drm_atomic_helper_resume(struct drm_device *dev, | ||
2031 | struct drm_atomic_state *state) | ||
2032 | { | ||
2033 | struct drm_mode_config *config = &dev->mode_config; | ||
2034 | int err; | ||
2035 | |||
2036 | drm_mode_config_reset(dev); | ||
2037 | drm_modeset_lock_all(dev); | ||
2038 | state->acquire_ctx = config->acquire_ctx; | ||
2039 | err = drm_atomic_commit(state); | ||
2040 | drm_modeset_unlock_all(dev); | ||
2041 | |||
2042 | return err; | ||
2043 | } | ||
2044 | EXPORT_SYMBOL(drm_atomic_helper_resume); | ||
2045 | |||
2046 | /** | ||
1821 | * drm_atomic_helper_crtc_set_property - helper for crtc properties | 2047 | * drm_atomic_helper_crtc_set_property - helper for crtc properties |
1822 | * @crtc: DRM crtc | 2048 | * @crtc: DRM crtc |
1823 | * @property: DRM property | 2049 | * @property: DRM property |
@@ -2429,7 +2655,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); | |||
2429 | * @ctx: lock acquisition context | 2655 | * @ctx: lock acquisition context |
2430 | * | 2656 | * |
2431 | * Makes a copy of the current atomic state by looping over all objects and | 2657 | * Makes a copy of the current atomic state by looping over all objects and |
2432 | * duplicating their respective states. | 2658 | * duplicating their respective states. This is used for example by suspend/ |
2659 | * resume support code to save the state prior to suspend such that it can | ||
2660 | * be restored upon resume. | ||
2433 | * | 2661 | * |
2434 | * Note that this treats atomic state as persistent between save and restore. | 2662 | * Note that this treats atomic state as persistent between save and restore. |
2435 | * Drivers must make sure that this is possible and won't result in confusion | 2663 | * Drivers must make sure that this is possible and won't result in confusion |
@@ -2441,6 +2669,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); | |||
2441 | * Returns: | 2669 | * Returns: |
2442 | * A pointer to the copy of the atomic state object on success or an | 2670 | * A pointer to the copy of the atomic state object on success or an |
2443 | * ERR_PTR()-encoded error code on failure. | 2671 | * ERR_PTR()-encoded error code on failure. |
2672 | * | ||
2673 | * See also: | ||
2674 | * drm_atomic_helper_suspend(), drm_atomic_helper_resume() | ||
2444 | */ | 2675 | */ |
2445 | struct drm_atomic_state * | 2676 | struct drm_atomic_state * |
2446 | drm_atomic_helper_duplicate_state(struct drm_device *dev, | 2677 | drm_atomic_helper_duplicate_state(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 6b4cf25fed12..10d0989db273 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -855,6 +855,12 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); | |||
855 | * due to slight differences in allocating shared resources when the | 855 | * due to slight differences in allocating shared resources when the |
856 | * configuration is restored in a different order than when userspace set it up) | 856 | * configuration is restored in a different order than when userspace set it up) |
857 | * need to use their own restore logic. | 857 | * need to use their own restore logic. |
858 | * | ||
859 | * This function is deprecated. New drivers should implement atomic mode- | ||
860 | * setting and use the atomic suspend/resume helpers. | ||
861 | * | ||
862 | * See also: | ||
863 | * drm_atomic_helper_suspend(), drm_atomic_helper_resume() | ||
858 | */ | 864 | */ |
859 | void drm_helper_resume_force_mode(struct drm_device *dev) | 865 | void drm_helper_resume_force_mode(struct drm_device *dev) |
860 | { | 866 | { |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index d5d2c03fd136..c214f1246cb4 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -2545,6 +2545,33 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode) | |||
2545 | return clock; | 2545 | return clock; |
2546 | } | 2546 | } |
2547 | 2547 | ||
2548 | static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match, | ||
2549 | unsigned int clock_tolerance) | ||
2550 | { | ||
2551 | u8 mode; | ||
2552 | |||
2553 | if (!to_match->clock) | ||
2554 | return 0; | ||
2555 | |||
2556 | for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { | ||
2557 | const struct drm_display_mode *cea_mode = &edid_cea_modes[mode]; | ||
2558 | unsigned int clock1, clock2; | ||
2559 | |||
2560 | /* Check both 60Hz and 59.94Hz */ | ||
2561 | clock1 = cea_mode->clock; | ||
2562 | clock2 = cea_mode_alternate_clock(cea_mode); | ||
2563 | |||
2564 | if (abs(to_match->clock - clock1) > clock_tolerance && | ||
2565 | abs(to_match->clock - clock2) > clock_tolerance) | ||
2566 | continue; | ||
2567 | |||
2568 | if (drm_mode_equal_no_clocks(to_match, cea_mode)) | ||
2569 | return mode + 1; | ||
2570 | } | ||
2571 | |||
2572 | return 0; | ||
2573 | } | ||
2574 | |||
2548 | /** | 2575 | /** |
2549 | * drm_match_cea_mode - look for a CEA mode matching given mode | 2576 | * drm_match_cea_mode - look for a CEA mode matching given mode |
2550 | * @to_match: display mode | 2577 | * @to_match: display mode |
@@ -2609,6 +2636,33 @@ hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode) | |||
2609 | return cea_mode_alternate_clock(hdmi_mode); | 2636 | return cea_mode_alternate_clock(hdmi_mode); |
2610 | } | 2637 | } |
2611 | 2638 | ||
2639 | static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match, | ||
2640 | unsigned int clock_tolerance) | ||
2641 | { | ||
2642 | u8 mode; | ||
2643 | |||
2644 | if (!to_match->clock) | ||
2645 | return 0; | ||
2646 | |||
2647 | for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) { | ||
2648 | const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode]; | ||
2649 | unsigned int clock1, clock2; | ||
2650 | |||
2651 | /* Make sure to also match alternate clocks */ | ||
2652 | clock1 = hdmi_mode->clock; | ||
2653 | clock2 = hdmi_mode_alternate_clock(hdmi_mode); | ||
2654 | |||
2655 | if (abs(to_match->clock - clock1) > clock_tolerance && | ||
2656 | abs(to_match->clock - clock2) > clock_tolerance) | ||
2657 | continue; | ||
2658 | |||
2659 | if (drm_mode_equal_no_clocks(to_match, hdmi_mode)) | ||
2660 | return mode + 1; | ||
2661 | } | ||
2662 | |||
2663 | return 0; | ||
2664 | } | ||
2665 | |||
2612 | /* | 2666 | /* |
2613 | * drm_match_hdmi_mode - look for a HDMI mode matching given mode | 2667 | * drm_match_hdmi_mode - look for a HDMI mode matching given mode |
2614 | * @to_match: display mode | 2668 | * @to_match: display mode |
@@ -3119,14 +3173,18 @@ static void fixup_detailed_cea_mode_clock(struct drm_display_mode *mode) | |||
3119 | u8 mode_idx; | 3173 | u8 mode_idx; |
3120 | const char *type; | 3174 | const char *type; |
3121 | 3175 | ||
3122 | mode_idx = drm_match_cea_mode(mode) - 1; | 3176 | /* |
3177 | * allow 5kHz clock difference either way to account for | ||
3178 | * the 10kHz clock resolution limit of detailed timings. | ||
3179 | */ | ||
3180 | mode_idx = drm_match_cea_mode_clock_tolerance(mode, 5) - 1; | ||
3123 | if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { | 3181 | if (mode_idx < ARRAY_SIZE(edid_cea_modes)) { |
3124 | type = "CEA"; | 3182 | type = "CEA"; |
3125 | cea_mode = &edid_cea_modes[mode_idx]; | 3183 | cea_mode = &edid_cea_modes[mode_idx]; |
3126 | clock1 = cea_mode->clock; | 3184 | clock1 = cea_mode->clock; |
3127 | clock2 = cea_mode_alternate_clock(cea_mode); | 3185 | clock2 = cea_mode_alternate_clock(cea_mode); |
3128 | } else { | 3186 | } else { |
3129 | mode_idx = drm_match_hdmi_mode(mode) - 1; | 3187 | mode_idx = drm_match_hdmi_mode_clock_tolerance(mode, 5) - 1; |
3130 | if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { | 3188 | if (mode_idx < ARRAY_SIZE(edid_4k_modes)) { |
3131 | type = "HDMI"; | 3189 | type = "HDMI"; |
3132 | cea_mode = &edid_4k_modes[mode_idx]; | 3190 | cea_mode = &edid_4k_modes[mode_idx]; |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index c59ce4d0ef75..81df9ae95e2e 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -172,6 +172,8 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) | |||
172 | init_waitqueue_head(&priv->event_wait); | 172 | init_waitqueue_head(&priv->event_wait); |
173 | priv->event_space = 4096; /* set aside 4k for event buffer */ | 173 | priv->event_space = 4096; /* set aside 4k for event buffer */ |
174 | 174 | ||
175 | mutex_init(&priv->event_read_lock); | ||
176 | |||
175 | if (drm_core_check_feature(dev, DRIVER_GEM)) | 177 | if (drm_core_check_feature(dev, DRIVER_GEM)) |
176 | drm_gem_open(dev, priv); | 178 | drm_gem_open(dev, priv); |
177 | 179 | ||
@@ -483,14 +485,28 @@ ssize_t drm_read(struct file *filp, char __user *buffer, | |||
483 | { | 485 | { |
484 | struct drm_file *file_priv = filp->private_data; | 486 | struct drm_file *file_priv = filp->private_data; |
485 | struct drm_device *dev = file_priv->minor->dev; | 487 | struct drm_device *dev = file_priv->minor->dev; |
486 | ssize_t ret = 0; | 488 | ssize_t ret; |
487 | 489 | ||
488 | if (!access_ok(VERIFY_WRITE, buffer, count)) | 490 | if (!access_ok(VERIFY_WRITE, buffer, count)) |
489 | return -EFAULT; | 491 | return -EFAULT; |
490 | 492 | ||
491 | spin_lock_irq(&dev->event_lock); | 493 | ret = mutex_lock_interruptible(&file_priv->event_read_lock); |
494 | if (ret) | ||
495 | return ret; | ||
496 | |||
492 | for (;;) { | 497 | for (;;) { |
493 | if (list_empty(&file_priv->event_list)) { | 498 | struct drm_pending_event *e = NULL; |
499 | |||
500 | spin_lock_irq(&dev->event_lock); | ||
501 | if (!list_empty(&file_priv->event_list)) { | ||
502 | e = list_first_entry(&file_priv->event_list, | ||
503 | struct drm_pending_event, link); | ||
504 | file_priv->event_space += e->event->length; | ||
505 | list_del(&e->link); | ||
506 | } | ||
507 | spin_unlock_irq(&dev->event_lock); | ||
508 | |||
509 | if (e == NULL) { | ||
494 | if (ret) | 510 | if (ret) |
495 | break; | 511 | break; |
496 | 512 | ||
@@ -499,36 +515,36 @@ ssize_t drm_read(struct file *filp, char __user *buffer, | |||
499 | break; | 515 | break; |
500 | } | 516 | } |
501 | 517 | ||
502 | spin_unlock_irq(&dev->event_lock); | 518 | mutex_unlock(&file_priv->event_read_lock); |
503 | ret = wait_event_interruptible(file_priv->event_wait, | 519 | ret = wait_event_interruptible(file_priv->event_wait, |
504 | !list_empty(&file_priv->event_list)); | 520 | !list_empty(&file_priv->event_list)); |
505 | spin_lock_irq(&dev->event_lock); | 521 | if (ret >= 0) |
506 | if (ret < 0) | 522 | ret = mutex_lock_interruptible(&file_priv->event_read_lock); |
507 | break; | 523 | if (ret) |
508 | 524 | return ret; | |
509 | ret = 0; | ||
510 | } else { | 525 | } else { |
511 | struct drm_pending_event *e; | 526 | unsigned length = e->event->length; |
512 | 527 | ||
513 | e = list_first_entry(&file_priv->event_list, | 528 | if (length > count - ret) { |
514 | struct drm_pending_event, link); | 529 | put_back_event: |
515 | if (e->event->length + ret > count) | 530 | spin_lock_irq(&dev->event_lock); |
531 | file_priv->event_space -= length; | ||
532 | list_add(&e->link, &file_priv->event_list); | ||
533 | spin_unlock_irq(&dev->event_lock); | ||
516 | break; | 534 | break; |
535 | } | ||
517 | 536 | ||
518 | if (__copy_to_user_inatomic(buffer + ret, | 537 | if (copy_to_user(buffer + ret, e->event, length)) { |
519 | e->event, e->event->length)) { | ||
520 | if (ret == 0) | 538 | if (ret == 0) |
521 | ret = -EFAULT; | 539 | ret = -EFAULT; |
522 | break; | 540 | goto put_back_event; |
523 | } | 541 | } |
524 | 542 | ||
525 | file_priv->event_space += e->event->length; | 543 | ret += length; |
526 | ret += e->event->length; | ||
527 | list_del(&e->link); | ||
528 | e->destroy(e); | 544 | e->destroy(e); |
529 | } | 545 | } |
530 | } | 546 | } |
531 | spin_unlock_irq(&dev->event_lock); | 547 | mutex_unlock(&file_priv->event_read_lock); |
532 | 548 | ||
533 | return ret; | 549 | return ret; |
534 | } | 550 | } |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index bde9b2911dc2..ef6bd3656548 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -917,13 +917,30 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ | |||
917 | } else if (mode1->clock != mode2->clock) | 917 | } else if (mode1->clock != mode2->clock) |
918 | return false; | 918 | return false; |
919 | 919 | ||
920 | return drm_mode_equal_no_clocks(mode1, mode2); | ||
921 | } | ||
922 | EXPORT_SYMBOL(drm_mode_equal); | ||
923 | |||
924 | /** | ||
925 | * drm_mode_equal_no_clocks - test modes for equality | ||
926 | * @mode1: first mode | ||
927 | * @mode2: second mode | ||
928 | * | ||
929 | * Check to see if @mode1 and @mode2 are equivalent, but | ||
930 | * don't check the pixel clocks. | ||
931 | * | ||
932 | * Returns: | ||
933 | * True if the modes are equal, false otherwise. | ||
934 | */ | ||
935 | bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) | ||
936 | { | ||
920 | if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) != | 937 | if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) != |
921 | (mode2->flags & DRM_MODE_FLAG_3D_MASK)) | 938 | (mode2->flags & DRM_MODE_FLAG_3D_MASK)) |
922 | return false; | 939 | return false; |
923 | 940 | ||
924 | return drm_mode_equal_no_clocks_no_stereo(mode1, mode2); | 941 | return drm_mode_equal_no_clocks_no_stereo(mode1, mode2); |
925 | } | 942 | } |
926 | EXPORT_SYMBOL(drm_mode_equal); | 943 | EXPORT_SYMBOL(drm_mode_equal_no_clocks); |
927 | 944 | ||
928 | /** | 945 | /** |
929 | * drm_mode_equal_no_clocks_no_stereo - test modes for equality | 946 | * drm_mode_equal_no_clocks_no_stereo - test modes for equality |
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index 6675b1428410..c2f5971146ba 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c | |||
@@ -57,11 +57,18 @@ | |||
57 | 57 | ||
58 | /** | 58 | /** |
59 | * drm_modeset_lock_all - take all modeset locks | 59 | * drm_modeset_lock_all - take all modeset locks |
60 | * @dev: drm device | 60 | * @dev: DRM device |
61 | * | 61 | * |
62 | * This function takes all modeset locks, suitable where a more fine-grained | 62 | * This function takes all modeset locks, suitable where a more fine-grained |
63 | * scheme isn't (yet) implemented. Locks must be dropped with | 63 | * scheme isn't (yet) implemented. Locks must be dropped by calling the |
64 | * drm_modeset_unlock_all. | 64 | * drm_modeset_unlock_all() function. |
65 | * | ||
66 | * This function is deprecated. It allocates a lock acquisition context and | ||
67 | * stores it in the DRM device's ->mode_config. This facilitate conversion of | ||
68 | * existing code because it removes the need to manually deal with the | ||
69 | * acquisition context, but it is also brittle because the context is global | ||
70 | * and care must be taken not to nest calls. New code should use the | ||
71 | * drm_modeset_lock_all_ctx() function and pass in the context explicitly. | ||
65 | */ | 72 | */ |
66 | void drm_modeset_lock_all(struct drm_device *dev) | 73 | void drm_modeset_lock_all(struct drm_device *dev) |
67 | { | 74 | { |
@@ -78,39 +85,43 @@ void drm_modeset_lock_all(struct drm_device *dev) | |||
78 | drm_modeset_acquire_init(ctx, 0); | 85 | drm_modeset_acquire_init(ctx, 0); |
79 | 86 | ||
80 | retry: | 87 | retry: |
81 | ret = drm_modeset_lock(&config->connection_mutex, ctx); | 88 | ret = drm_modeset_lock_all_ctx(dev, ctx); |
82 | if (ret) | 89 | if (ret < 0) { |
83 | goto fail; | 90 | if (ret == -EDEADLK) { |
84 | ret = drm_modeset_lock_all_crtcs(dev, ctx); | 91 | drm_modeset_backoff(ctx); |
85 | if (ret) | 92 | goto retry; |
86 | goto fail; | 93 | } |
94 | |||
95 | drm_modeset_acquire_fini(ctx); | ||
96 | kfree(ctx); | ||
97 | return; | ||
98 | } | ||
87 | 99 | ||
88 | WARN_ON(config->acquire_ctx); | 100 | WARN_ON(config->acquire_ctx); |
89 | 101 | ||
90 | /* now we hold the locks, so now that it is safe, stash the | 102 | /* |
91 | * ctx for drm_modeset_unlock_all(): | 103 | * We hold the locks now, so it is safe to stash the acquisition |
104 | * context for drm_modeset_unlock_all(). | ||
92 | */ | 105 | */ |
93 | config->acquire_ctx = ctx; | 106 | config->acquire_ctx = ctx; |
94 | 107 | ||
95 | drm_warn_on_modeset_not_all_locked(dev); | 108 | drm_warn_on_modeset_not_all_locked(dev); |
96 | |||
97 | return; | ||
98 | |||
99 | fail: | ||
100 | if (ret == -EDEADLK) { | ||
101 | drm_modeset_backoff(ctx); | ||
102 | goto retry; | ||
103 | } | ||
104 | |||
105 | kfree(ctx); | ||
106 | } | 109 | } |
107 | EXPORT_SYMBOL(drm_modeset_lock_all); | 110 | EXPORT_SYMBOL(drm_modeset_lock_all); |
108 | 111 | ||
109 | /** | 112 | /** |
110 | * drm_modeset_unlock_all - drop all modeset locks | 113 | * drm_modeset_unlock_all - drop all modeset locks |
111 | * @dev: device | 114 | * @dev: DRM device |
112 | * | 115 | * |
113 | * This function drop all modeset locks taken by drm_modeset_lock_all. | 116 | * This function drops all modeset locks taken by a previous call to the |
117 | * drm_modeset_lock_all() function. | ||
118 | * | ||
119 | * This function is deprecated. It uses the lock acquisition context stored | ||
120 | * in the DRM device's ->mode_config. This facilitates conversion of existing | ||
121 | * code because it removes the need to manually deal with the acquisition | ||
122 | * context, but it is also brittle because the context is global and care must | ||
123 | * be taken not to nest calls. New code should pass the acquisition context | ||
124 | * directly to the drm_modeset_drop_locks() function. | ||
114 | */ | 125 | */ |
115 | void drm_modeset_unlock_all(struct drm_device *dev) | 126 | void drm_modeset_unlock_all(struct drm_device *dev) |
116 | { | 127 | { |
@@ -431,14 +442,34 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock) | |||
431 | } | 442 | } |
432 | EXPORT_SYMBOL(drm_modeset_unlock); | 443 | EXPORT_SYMBOL(drm_modeset_unlock); |
433 | 444 | ||
434 | /* In some legacy codepaths it's convenient to just grab all the crtc and plane | 445 | /** |
435 | * related locks. */ | 446 | * drm_modeset_lock_all_ctx - take all modeset locks |
436 | int drm_modeset_lock_all_crtcs(struct drm_device *dev, | 447 | * @dev: DRM device |
437 | struct drm_modeset_acquire_ctx *ctx) | 448 | * @ctx: lock acquisition context |
449 | * | ||
450 | * This function takes all modeset locks, suitable where a more fine-grained | ||
451 | * scheme isn't (yet) implemented. | ||
452 | * | ||
453 | * Unlike drm_modeset_lock_all(), it doesn't take the dev->mode_config.mutex | ||
454 | * since that lock isn't required for modeset state changes. Callers which | ||
455 | * need to grab that lock too need to do so outside of the acquire context | ||
456 | * @ctx. | ||
457 | * | ||
458 | * Locks acquired with this function should be released by calling the | ||
459 | * drm_modeset_drop_locks() function on @ctx. | ||
460 | * | ||
461 | * Returns: 0 on success or a negative error-code on failure. | ||
462 | */ | ||
463 | int drm_modeset_lock_all_ctx(struct drm_device *dev, | ||
464 | struct drm_modeset_acquire_ctx *ctx) | ||
438 | { | 465 | { |
439 | struct drm_crtc *crtc; | 466 | struct drm_crtc *crtc; |
440 | struct drm_plane *plane; | 467 | struct drm_plane *plane; |
441 | int ret = 0; | 468 | int ret; |
469 | |||
470 | ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); | ||
471 | if (ret) | ||
472 | return ret; | ||
442 | 473 | ||
443 | drm_for_each_crtc(crtc, dev) { | 474 | drm_for_each_crtc(crtc, dev) { |
444 | ret = drm_modeset_lock(&crtc->mutex, ctx); | 475 | ret = drm_modeset_lock(&crtc->mutex, ctx); |
@@ -454,4 +485,4 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev, | |||
454 | 485 | ||
455 | return 0; | 486 | return 0; |
456 | } | 487 | } |
457 | EXPORT_SYMBOL(drm_modeset_lock_all_crtcs); | 488 | EXPORT_SYMBOL(drm_modeset_lock_all_ctx); |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 94ba39e34299..eee3b6f38cfb 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
@@ -168,10 +168,11 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect | |||
168 | * check here, and if anything changed start the hotplug code. | 168 | * check here, and if anything changed start the hotplug code. |
169 | */ | 169 | */ |
170 | if (old_status != connector->status) { | 170 | if (old_status != connector->status) { |
171 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", | 171 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", |
172 | connector->base.id, | 172 | connector->base.id, |
173 | connector->name, | 173 | connector->name, |
174 | old_status, connector->status); | 174 | drm_get_connector_status_name(old_status), |
175 | drm_get_connector_status_name(connector->status)); | ||
175 | 176 | ||
176 | /* | 177 | /* |
177 | * The hotplug event code might call into the fb | 178 | * The hotplug event code might call into the fb |
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index dc0508dca1d4..ee95c03a8c54 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c | |||
@@ -406,8 +406,6 @@ static int psbfb_create(struct psb_fbdev *fbdev, | |||
406 | 406 | ||
407 | memset(dev_priv->vram_addr + backing->offset, 0, size); | 407 | memset(dev_priv->vram_addr + backing->offset, 0, size); |
408 | 408 | ||
409 | mutex_lock(&dev->struct_mutex); | ||
410 | |||
411 | info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper); | 409 | info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper); |
412 | if (IS_ERR(info)) { | 410 | if (IS_ERR(info)) { |
413 | ret = PTR_ERR(info); | 411 | ret = PTR_ERR(info); |
@@ -463,17 +461,15 @@ static int psbfb_create(struct psb_fbdev *fbdev, | |||
463 | dev_dbg(dev->dev, "allocated %dx%d fb\n", | 461 | dev_dbg(dev->dev, "allocated %dx%d fb\n", |
464 | psbfb->base.width, psbfb->base.height); | 462 | psbfb->base.width, psbfb->base.height); |
465 | 463 | ||
466 | mutex_unlock(&dev->struct_mutex); | ||
467 | return 0; | 464 | return 0; |
468 | out_unref: | 465 | out_unref: |
469 | if (backing->stolen) | 466 | if (backing->stolen) |
470 | psb_gtt_free_range(dev, backing); | 467 | psb_gtt_free_range(dev, backing); |
471 | else | 468 | else |
472 | drm_gem_object_unreference(&backing->gem); | 469 | drm_gem_object_unreference_unlocked(&backing->gem); |
473 | 470 | ||
474 | drm_fb_helper_release_fbi(&fbdev->psb_fb_helper); | 471 | drm_fb_helper_release_fbi(&fbdev->psb_fb_helper); |
475 | out_err1: | 472 | out_err1: |
476 | mutex_unlock(&dev->struct_mutex); | ||
477 | psb_gtt_free_range(dev, backing); | 473 | psb_gtt_free_range(dev, backing); |
478 | return ret; | 474 | return ret; |
479 | } | 475 | } |
@@ -569,7 +565,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev) | |||
569 | drm_framebuffer_cleanup(&psbfb->base); | 565 | drm_framebuffer_cleanup(&psbfb->base); |
570 | 566 | ||
571 | if (psbfb->gtt) | 567 | if (psbfb->gtt) |
572 | drm_gem_object_unreference(&psbfb->gtt->gem); | 568 | drm_gem_object_unreference_unlocked(&psbfb->gtt->gem); |
573 | return 0; | 569 | return 0; |
574 | } | 570 | } |
575 | 571 | ||
@@ -784,12 +780,8 @@ void psb_modeset_cleanup(struct drm_device *dev) | |||
784 | { | 780 | { |
785 | struct drm_psb_private *dev_priv = dev->dev_private; | 781 | struct drm_psb_private *dev_priv = dev->dev_private; |
786 | if (dev_priv->modeset) { | 782 | if (dev_priv->modeset) { |
787 | mutex_lock(&dev->struct_mutex); | ||
788 | |||
789 | drm_kms_helper_poll_fini(dev); | 783 | drm_kms_helper_poll_fini(dev); |
790 | psb_fbdev_fini(dev); | 784 | psb_fbdev_fini(dev); |
791 | drm_mode_config_cleanup(dev); | 785 | drm_mode_config_cleanup(dev); |
792 | |||
793 | mutex_unlock(&dev->struct_mutex); | ||
794 | } | 786 | } |
795 | } | 787 | } |
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index c707fa6fca85..506224b3a0ad 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c | |||
@@ -62,15 +62,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, | |||
62 | int ret = 0; | 62 | int ret = 0; |
63 | struct drm_gem_object *obj; | 63 | struct drm_gem_object *obj; |
64 | 64 | ||
65 | mutex_lock(&dev->struct_mutex); | ||
66 | |||
67 | /* GEM does all our handle to object mapping */ | 65 | /* GEM does all our handle to object mapping */ |
68 | obj = drm_gem_object_lookup(dev, file, handle); | 66 | obj = drm_gem_object_lookup(dev, file, handle); |
69 | if (obj == NULL) { | 67 | if (obj == NULL) |
70 | ret = -ENOENT; | 68 | return -ENOENT; |
71 | goto unlock; | ||
72 | } | ||
73 | /* What validation is needed here ? */ | ||
74 | 69 | ||
75 | /* Make it mmapable */ | 70 | /* Make it mmapable */ |
76 | ret = drm_gem_create_mmap_offset(obj); | 71 | ret = drm_gem_create_mmap_offset(obj); |
@@ -78,9 +73,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, | |||
78 | goto out; | 73 | goto out; |
79 | *offset = drm_vma_node_offset_addr(&obj->vma_node); | 74 | *offset = drm_vma_node_offset_addr(&obj->vma_node); |
80 | out: | 75 | out: |
81 | drm_gem_object_unreference(obj); | 76 | drm_gem_object_unreference_unlocked(obj); |
82 | unlock: | ||
83 | mutex_unlock(&dev->struct_mutex); | ||
84 | return ret; | 77 | return ret; |
85 | } | 78 | } |
86 | 79 | ||
@@ -130,7 +123,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, | |||
130 | return ret; | 123 | return ret; |
131 | } | 124 | } |
132 | /* We have the initial and handle reference but need only one now */ | 125 | /* We have the initial and handle reference but need only one now */ |
133 | drm_gem_object_unreference(&r->gem); | 126 | drm_gem_object_unreference_unlocked(&r->gem); |
134 | *handlep = handle; | 127 | *handlep = handle; |
135 | return 0; | 128 | return 0; |
136 | } | 129 | } |
@@ -189,7 +182,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
189 | 182 | ||
190 | /* Make sure we don't parallel update on a fault, nor move or remove | 183 | /* Make sure we don't parallel update on a fault, nor move or remove |
191 | something from beneath our feet */ | 184 | something from beneath our feet */ |
192 | mutex_lock(&dev->struct_mutex); | 185 | mutex_lock(&dev_priv->mmap_mutex); |
193 | 186 | ||
194 | /* For now the mmap pins the object and it stays pinned. As things | 187 | /* For now the mmap pins the object and it stays pinned. As things |
195 | stand that will do us no harm */ | 188 | stand that will do us no harm */ |
@@ -215,7 +208,7 @@ int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
215 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | 208 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); |
216 | 209 | ||
217 | fail: | 210 | fail: |
218 | mutex_unlock(&dev->struct_mutex); | 211 | mutex_unlock(&dev_priv->mmap_mutex); |
219 | switch (ret) { | 212 | switch (ret) { |
220 | case 0: | 213 | case 0: |
221 | case -ERESTARTSYS: | 214 | case -ERESTARTSYS: |
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 001b450b27b3..ff17af4cfc64 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c | |||
@@ -349,8 +349,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, | |||
349 | /* If we didn't get a handle then turn the cursor off */ | 349 | /* If we didn't get a handle then turn the cursor off */ |
350 | if (!handle) { | 350 | if (!handle) { |
351 | temp = CURSOR_MODE_DISABLE; | 351 | temp = CURSOR_MODE_DISABLE; |
352 | mutex_lock(&dev->struct_mutex); | ||
353 | |||
354 | if (gma_power_begin(dev, false)) { | 352 | if (gma_power_begin(dev, false)) { |
355 | REG_WRITE(control, temp); | 353 | REG_WRITE(control, temp); |
356 | REG_WRITE(base, 0); | 354 | REG_WRITE(base, 0); |
@@ -362,11 +360,9 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, | |||
362 | gt = container_of(gma_crtc->cursor_obj, | 360 | gt = container_of(gma_crtc->cursor_obj, |
363 | struct gtt_range, gem); | 361 | struct gtt_range, gem); |
364 | psb_gtt_unpin(gt); | 362 | psb_gtt_unpin(gt); |
365 | drm_gem_object_unreference(gma_crtc->cursor_obj); | 363 | drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj); |
366 | gma_crtc->cursor_obj = NULL; | 364 | gma_crtc->cursor_obj = NULL; |
367 | } | 365 | } |
368 | |||
369 | mutex_unlock(&dev->struct_mutex); | ||
370 | return 0; | 366 | return 0; |
371 | } | 367 | } |
372 | 368 | ||
@@ -376,7 +372,6 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, | |||
376 | return -EINVAL; | 372 | return -EINVAL; |
377 | } | 373 | } |
378 | 374 | ||
379 | mutex_lock(&dev->struct_mutex); | ||
380 | obj = drm_gem_object_lookup(dev, file_priv, handle); | 375 | obj = drm_gem_object_lookup(dev, file_priv, handle); |
381 | if (!obj) { | 376 | if (!obj) { |
382 | ret = -ENOENT; | 377 | ret = -ENOENT; |
@@ -441,17 +436,15 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc, | |||
441 | if (gma_crtc->cursor_obj) { | 436 | if (gma_crtc->cursor_obj) { |
442 | gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); | 437 | gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); |
443 | psb_gtt_unpin(gt); | 438 | psb_gtt_unpin(gt); |
444 | drm_gem_object_unreference(gma_crtc->cursor_obj); | 439 | drm_gem_object_unreference_unlocked(gma_crtc->cursor_obj); |
445 | } | 440 | } |
446 | 441 | ||
447 | gma_crtc->cursor_obj = obj; | 442 | gma_crtc->cursor_obj = obj; |
448 | unlock: | 443 | unlock: |
449 | mutex_unlock(&dev->struct_mutex); | ||
450 | return ret; | 444 | return ret; |
451 | 445 | ||
452 | unref_cursor: | 446 | unref_cursor: |
453 | drm_gem_object_unreference(obj); | 447 | drm_gem_object_unreference_unlocked(obj); |
454 | mutex_unlock(&dev->struct_mutex); | ||
455 | return ret; | 448 | return ret; |
456 | } | 449 | } |
457 | 450 | ||
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index ce015db59dc6..8f69225ce2b4 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c | |||
@@ -425,6 +425,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) | |||
425 | 425 | ||
426 | if (!resume) { | 426 | if (!resume) { |
427 | mutex_init(&dev_priv->gtt_mutex); | 427 | mutex_init(&dev_priv->gtt_mutex); |
428 | mutex_init(&dev_priv->mmap_mutex); | ||
428 | psb_gtt_alloc(dev); | 429 | psb_gtt_alloc(dev); |
429 | } | 430 | } |
430 | 431 | ||
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index e21726ecac32..3bd2c726dd61 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h | |||
@@ -465,6 +465,8 @@ struct drm_psb_private { | |||
465 | struct mutex gtt_mutex; | 465 | struct mutex gtt_mutex; |
466 | struct resource *gtt_mem; /* Our PCI resource */ | 466 | struct resource *gtt_mem; /* Our PCI resource */ |
467 | 467 | ||
468 | struct mutex mmap_mutex; | ||
469 | |||
468 | struct psb_mmu_driver *mmu; | 470 | struct psb_mmu_driver *mmu; |
469 | struct psb_mmu_pd *pf_pd; | 471 | struct psb_mmu_pd *pf_pd; |
470 | 472 | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 30d4a5a495e2..8e1df1f7057c 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -344,6 +344,8 @@ struct drm_file { | |||
344 | struct list_head event_list; | 344 | struct list_head event_list; |
345 | int event_space; | 345 | int event_space; |
346 | 346 | ||
347 | struct mutex event_read_lock; | ||
348 | |||
347 | struct drm_prime_file_private prime; | 349 | struct drm_prime_file_private prime; |
348 | }; | 350 | }; |
349 | 351 | ||
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 8cba54a2a0a0..a286cce98720 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h | |||
@@ -62,6 +62,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, | |||
62 | void drm_atomic_helper_cleanup_planes(struct drm_device *dev, | 62 | void drm_atomic_helper_cleanup_planes(struct drm_device *dev, |
63 | struct drm_atomic_state *old_state); | 63 | struct drm_atomic_state *old_state); |
64 | void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); | 64 | void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state); |
65 | void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, | ||
66 | bool atomic); | ||
65 | 67 | ||
66 | void drm_atomic_helper_swap_state(struct drm_device *dev, | 68 | void drm_atomic_helper_swap_state(struct drm_device *dev, |
67 | struct drm_atomic_state *state); | 69 | struct drm_atomic_state *state); |
@@ -81,6 +83,12 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set); | |||
81 | int __drm_atomic_helper_set_config(struct drm_mode_set *set, | 83 | int __drm_atomic_helper_set_config(struct drm_mode_set *set, |
82 | struct drm_atomic_state *state); | 84 | struct drm_atomic_state *state); |
83 | 85 | ||
86 | int drm_atomic_helper_disable_all(struct drm_device *dev, | ||
87 | struct drm_modeset_acquire_ctx *ctx); | ||
88 | struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev); | ||
89 | int drm_atomic_helper_resume(struct drm_device *dev, | ||
90 | struct drm_atomic_state *state); | ||
91 | |||
84 | int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, | 92 | int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc, |
85 | struct drm_property *property, | 93 | struct drm_property *property, |
86 | uint64_t val); | 94 | uint64_t val); |
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 08a8cac9e555..f9115aee43f4 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h | |||
@@ -222,6 +222,8 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, | |||
222 | const struct drm_display_mode *mode); | 222 | const struct drm_display_mode *mode); |
223 | bool drm_mode_equal(const struct drm_display_mode *mode1, | 223 | bool drm_mode_equal(const struct drm_display_mode *mode1, |
224 | const struct drm_display_mode *mode2); | 224 | const struct drm_display_mode *mode2); |
225 | bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, | ||
226 | const struct drm_display_mode *mode2); | ||
225 | bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, | 227 | bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, |
226 | const struct drm_display_mode *mode2); | 228 | const struct drm_display_mode *mode2); |
227 | 229 | ||
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index 94938d89347c..c5576fbcb909 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h | |||
@@ -138,7 +138,7 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); | |||
138 | struct drm_modeset_acquire_ctx * | 138 | struct drm_modeset_acquire_ctx * |
139 | drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); | 139 | drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc); |
140 | 140 | ||
141 | int drm_modeset_lock_all_crtcs(struct drm_device *dev, | 141 | int drm_modeset_lock_all_ctx(struct drm_device *dev, |
142 | struct drm_modeset_acquire_ctx *ctx); | 142 | struct drm_modeset_acquire_ctx *ctx); |
143 | 143 | ||
144 | #endif /* DRM_MODESET_LOCK_H_ */ | 144 | #endif /* DRM_MODESET_LOCK_H_ */ |